diff options
author | Jens Axboe <axboe@kernel.dk> | 2013-04-02 04:04:39 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-04-02 04:04:39 -0400 |
commit | 64f8de4da7d3962632f152d3d702d68bb8accc29 (patch) | |
tree | c90a872a6d91c824635d59572e1e578980f4bc98 /drivers/s390 | |
parent | f1fb3449efd5c49b48e35746bc7283eb9c73e3a0 (diff) | |
parent | b5c872ddb7083c7909fb76a170c3807e04564bb3 (diff) |
Merge branch 'writeback-workqueue' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq into for-3.10/core
Tejun writes:
-----
This is the pull request for the earlier patchset[1] with the same
name. It's only three patches (the first one was committed to
workqueue tree) but the merge strategy is a bit involved due to the
dependencies.
* Because the conversion needs features from wq/for-3.10,
block/for-3.10/core is based on rc3, and wq/for-3.10 has conflicts
with rc3, I pulled mainline (rc5) into wq/for-3.10 to prevent those
workqueue conflicts from flaring up in block tree.
* Resolving the issue that Jan and Dave raised about debugging
requires arch-wide changes. The patchset is being worked on[2] but
it'll have to go through -mm after these changes show up in -next,
and not included in this pull request.
The three commits are located in the following git branch.
git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git writeback-workqueue
Pulling it into block/for-3.10/core produces a conflict in
drivers/md/raid5.c between the following two commits.
e3620a3ad5 ("MD RAID5: Avoid accessing gendisk or queue structs when not available")
2f6db2a707 ("raid5: use bio_reset()")
The conflict is trivial - one removes an "if ()" conditional while the
other removes "rbi->bi_next = NULL" right above it. We just need to
remove both. The merged branch is available at
git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git block-test-merge
so that you can use it for verification. The test merge commit has
proper merge description.
While these changes are a bit of pain to route, they make code simpler
and even have, while minute, measureable performance gain[3] even on a
workload which isn't particularly favorable to showing the benefits of
this conversion.
----
Fixed up the conflict.
Conflicts:
drivers/md/raid5.c
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/scm_blk.c | 69 | ||||
-rw-r--r-- | drivers/s390/block/scm_blk.h | 2 | ||||
-rw-r--r-- | drivers/s390/block/scm_drv.c | 23 | ||||
-rw-r--r-- | drivers/s390/char/sclp_cmd.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 17 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/scm.c | 18 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core.h | 1 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 45 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 23 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_sys.c | 2 |
11 files changed, 175 insertions, 29 deletions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 9978ad4433cb..5ac9c935c151 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = { | |||
135 | .release = scm_release, | 135 | .release = scm_release, |
136 | }; | 136 | }; |
137 | 137 | ||
138 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) | ||
139 | { | ||
140 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; | ||
141 | } | ||
142 | |||
138 | static void scm_request_prepare(struct scm_request *scmrq) | 143 | static void scm_request_prepare(struct scm_request *scmrq) |
139 | { | 144 | { |
140 | struct scm_blk_dev *bdev = scmrq->bdev; | 145 | struct scm_blk_dev *bdev = scmrq->bdev; |
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq) | |||
195 | 200 | ||
196 | scm_release_cluster(scmrq); | 201 | scm_release_cluster(scmrq); |
197 | blk_requeue_request(bdev->rq, scmrq->request); | 202 | blk_requeue_request(bdev->rq, scmrq->request); |
203 | atomic_dec(&bdev->queued_reqs); | ||
198 | scm_request_done(scmrq); | 204 | scm_request_done(scmrq); |
199 | scm_ensure_queue_restart(bdev); | 205 | scm_ensure_queue_restart(bdev); |
200 | } | 206 | } |
201 | 207 | ||
202 | void scm_request_finish(struct scm_request *scmrq) | 208 | void scm_request_finish(struct scm_request *scmrq) |
203 | { | 209 | { |
210 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
211 | |||
204 | scm_release_cluster(scmrq); | 212 | scm_release_cluster(scmrq); |
205 | blk_end_request_all(scmrq->request, scmrq->error); | 213 | blk_end_request_all(scmrq->request, scmrq->error); |
214 | atomic_dec(&bdev->queued_reqs); | ||
206 | scm_request_done(scmrq); | 215 | scm_request_done(scmrq); |
207 | } | 216 | } |
208 | 217 | ||
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq) | |||
218 | if (req->cmd_type != REQ_TYPE_FS) | 227 | if (req->cmd_type != REQ_TYPE_FS) |
219 | continue; | 228 | continue; |
220 | 229 | ||
230 | if (!scm_permit_request(bdev, req)) { | ||
231 | scm_ensure_queue_restart(bdev); | ||
232 | return; | ||
233 | } | ||
221 | scmrq = scm_request_fetch(); | 234 | scmrq = scm_request_fetch(); |
222 | if (!scmrq) { | 235 | if (!scmrq) { |
223 | SCM_LOG(5, "no request"); | 236 | SCM_LOG(5, "no request"); |
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq) | |||
231 | return; | 244 | return; |
232 | } | 245 | } |
233 | if (scm_need_cluster_request(scmrq)) { | 246 | if (scm_need_cluster_request(scmrq)) { |
247 | atomic_inc(&bdev->queued_reqs); | ||
234 | blk_start_request(req); | 248 | blk_start_request(req); |
235 | scm_initiate_cluster_request(scmrq); | 249 | scm_initiate_cluster_request(scmrq); |
236 | return; | 250 | return; |
237 | } | 251 | } |
238 | scm_request_prepare(scmrq); | 252 | scm_request_prepare(scmrq); |
253 | atomic_inc(&bdev->queued_reqs); | ||
239 | blk_start_request(req); | 254 | blk_start_request(req); |
240 | 255 | ||
241 | ret = scm_start_aob(scmrq->aob); | 256 | ret = scm_start_aob(scmrq->aob); |
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq) | |||
244 | scm_request_requeue(scmrq); | 259 | scm_request_requeue(scmrq); |
245 | return; | 260 | return; |
246 | } | 261 | } |
247 | atomic_inc(&bdev->queued_reqs); | ||
248 | } | 262 | } |
249 | } | 263 | } |
250 | 264 | ||
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error) | |||
280 | tasklet_hi_schedule(&bdev->tasklet); | 294 | tasklet_hi_schedule(&bdev->tasklet); |
281 | } | 295 | } |
282 | 296 | ||
297 | static void scm_blk_handle_error(struct scm_request *scmrq) | ||
298 | { | ||
299 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
300 | unsigned long flags; | ||
301 | |||
302 | if (scmrq->error != -EIO) | ||
303 | goto restart; | ||
304 | |||
305 | /* For -EIO the response block is valid. */ | ||
306 | switch (scmrq->aob->response.eqc) { | ||
307 | case EQC_WR_PROHIBIT: | ||
308 | spin_lock_irqsave(&bdev->lock, flags); | ||
309 | if (bdev->state != SCM_WR_PROHIBIT) | ||
310 | pr_info("%lu: Write access to the SCM increment is suspended\n", | ||
311 | (unsigned long) bdev->scmdev->address); | ||
312 | bdev->state = SCM_WR_PROHIBIT; | ||
313 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
314 | goto requeue; | ||
315 | default: | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | restart: | ||
320 | if (!scm_start_aob(scmrq->aob)) | ||
321 | return; | ||
322 | |||
323 | requeue: | ||
324 | spin_lock_irqsave(&bdev->rq_lock, flags); | ||
325 | scm_request_requeue(scmrq); | ||
326 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
327 | } | ||
328 | |||
283 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) | 329 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) |
284 | { | 330 | { |
285 | struct scm_request *scmrq; | 331 | struct scm_request *scmrq; |
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
293 | spin_unlock_irqrestore(&bdev->lock, flags); | 339 | spin_unlock_irqrestore(&bdev->lock, flags); |
294 | 340 | ||
295 | if (scmrq->error && scmrq->retries-- > 0) { | 341 | if (scmrq->error && scmrq->retries-- > 0) { |
296 | if (scm_start_aob(scmrq->aob)) { | 342 | scm_blk_handle_error(scmrq); |
297 | spin_lock_irqsave(&bdev->rq_lock, flags); | 343 | |
298 | scm_request_requeue(scmrq); | ||
299 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
300 | } | ||
301 | /* Request restarted or requeued, handle next. */ | 344 | /* Request restarted or requeued, handle next. */ |
302 | spin_lock_irqsave(&bdev->lock, flags); | 345 | spin_lock_irqsave(&bdev->lock, flags); |
303 | continue; | 346 | continue; |
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
310 | } | 353 | } |
311 | 354 | ||
312 | scm_request_finish(scmrq); | 355 | scm_request_finish(scmrq); |
313 | atomic_dec(&bdev->queued_reqs); | ||
314 | spin_lock_irqsave(&bdev->lock, flags); | 356 | spin_lock_irqsave(&bdev->lock, flags); |
315 | } | 357 | } |
316 | spin_unlock_irqrestore(&bdev->lock, flags); | 358 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
332 | } | 374 | } |
333 | 375 | ||
334 | bdev->scmdev = scmdev; | 376 | bdev->scmdev = scmdev; |
377 | bdev->state = SCM_OPER; | ||
335 | spin_lock_init(&bdev->rq_lock); | 378 | spin_lock_init(&bdev->rq_lock); |
336 | spin_lock_init(&bdev->lock); | 379 | spin_lock_init(&bdev->lock); |
337 | INIT_LIST_HEAD(&bdev->finished_requests); | 380 | INIT_LIST_HEAD(&bdev->finished_requests); |
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) | |||
396 | put_disk(bdev->gendisk); | 439 | put_disk(bdev->gendisk); |
397 | } | 440 | } |
398 | 441 | ||
442 | void scm_blk_set_available(struct scm_blk_dev *bdev) | ||
443 | { | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&bdev->lock, flags); | ||
447 | if (bdev->state == SCM_WR_PROHIBIT) | ||
448 | pr_info("%lu: Write access to the SCM increment is restored\n", | ||
449 | (unsigned long) bdev->scmdev->address); | ||
450 | bdev->state = SCM_OPER; | ||
451 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
452 | } | ||
453 | |||
399 | static int __init scm_blk_init(void) | 454 | static int __init scm_blk_init(void) |
400 | { | 455 | { |
401 | int ret = -EINVAL; | 456 | int ret = -EINVAL; |
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 3c1ccf494647..8b387b32fd62 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h | |||
@@ -21,6 +21,7 @@ struct scm_blk_dev { | |||
21 | spinlock_t rq_lock; /* guard the request queue */ | 21 | spinlock_t rq_lock; /* guard the request queue */ |
22 | spinlock_t lock; /* guard the rest of the blockdev */ | 22 | spinlock_t lock; /* guard the rest of the blockdev */ |
23 | atomic_t queued_reqs; | 23 | atomic_t queued_reqs; |
24 | enum {SCM_OPER, SCM_WR_PROHIBIT} state; | ||
24 | struct list_head finished_requests; | 25 | struct list_head finished_requests; |
25 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE | 26 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
26 | struct list_head cluster_list; | 27 | struct list_head cluster_list; |
@@ -48,6 +49,7 @@ struct scm_request { | |||
48 | 49 | ||
49 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); | 50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); |
50 | void scm_blk_dev_cleanup(struct scm_blk_dev *); | 51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); |
52 | void scm_blk_set_available(struct scm_blk_dev *); | ||
51 | void scm_blk_irq(struct scm_device *, void *, int); | 53 | void scm_blk_irq(struct scm_device *, void *, int); |
52 | 54 | ||
53 | void scm_request_finish(struct scm_request *); | 55 | void scm_request_finish(struct scm_request *); |
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 9fa0a908607b..5f6180d6ff08 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c | |||
@@ -13,12 +13,23 @@ | |||
13 | #include <asm/eadm.h> | 13 | #include <asm/eadm.h> |
14 | #include "scm_blk.h" | 14 | #include "scm_blk.h" |
15 | 15 | ||
16 | static void notify(struct scm_device *scmdev) | 16 | static void scm_notify(struct scm_device *scmdev, enum scm_event event) |
17 | { | 17 | { |
18 | pr_info("%lu: The capabilities of the SCM increment changed\n", | 18 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); |
19 | (unsigned long) scmdev->address); | 19 | |
20 | SCM_LOG(2, "State changed"); | 20 | switch (event) { |
21 | SCM_LOG_STATE(2, scmdev); | 21 | case SCM_CHANGE: |
22 | pr_info("%lu: The capabilities of the SCM increment changed\n", | ||
23 | (unsigned long) scmdev->address); | ||
24 | SCM_LOG(2, "State changed"); | ||
25 | SCM_LOG_STATE(2, scmdev); | ||
26 | break; | ||
27 | case SCM_AVAIL: | ||
28 | SCM_LOG(2, "Increment available"); | ||
29 | SCM_LOG_STATE(2, scmdev); | ||
30 | scm_blk_set_available(bdev); | ||
31 | break; | ||
32 | } | ||
22 | } | 33 | } |
23 | 34 | ||
24 | static int scm_probe(struct scm_device *scmdev) | 35 | static int scm_probe(struct scm_device *scmdev) |
@@ -64,7 +75,7 @@ static struct scm_driver scm_drv = { | |||
64 | .name = "scm_block", | 75 | .name = "scm_block", |
65 | .owner = THIS_MODULE, | 76 | .owner = THIS_MODULE, |
66 | }, | 77 | }, |
67 | .notify = notify, | 78 | .notify = scm_notify, |
68 | .probe = scm_probe, | 79 | .probe = scm_probe, |
69 | .remove = scm_remove, | 80 | .remove = scm_remove, |
70 | .handler = scm_blk_irq, | 81 | .handler = scm_blk_irq, |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 30a2255389e5..cd798386b622 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -627,6 +627,8 @@ static int __init sclp_detect_standby_memory(void) | |||
627 | struct read_storage_sccb *sccb; | 627 | struct read_storage_sccb *sccb; |
628 | int i, id, assigned, rc; | 628 | int i, id, assigned, rc; |
629 | 629 | ||
630 | if (OLDMEM_BASE) /* No standby memory in kdump mode */ | ||
631 | return 0; | ||
630 | if (!early_read_info_sccb_valid) | 632 | if (!early_read_info_sccb_valid) |
631 | return 0; | 633 | return 0; |
632 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | 634 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 31ceef1beb8b..e16c553f6556 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) | |||
433 | " failed (rc=%d).\n", ret); | 433 | " failed (rc=%d).\n", ret); |
434 | } | 434 | } |
435 | 435 | ||
436 | static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) | ||
437 | { | ||
438 | int ret; | ||
439 | |||
440 | CIO_CRW_EVENT(4, "chsc: scm available information\n"); | ||
441 | if (sei_area->rs != 7) | ||
442 | return; | ||
443 | |||
444 | ret = scm_process_availability_information(); | ||
445 | if (ret) | ||
446 | CIO_CRW_EVENT(0, "chsc: process availability information" | ||
447 | " failed (rc=%d).\n", ret); | ||
448 | } | ||
449 | |||
436 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) | 450 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) |
437 | { | 451 | { |
438 | switch (sei_area->cc) { | 452 | switch (sei_area->cc) { |
@@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) | |||
468 | case 12: /* scm change notification */ | 482 | case 12: /* scm change notification */ |
469 | chsc_process_sei_scm_change(sei_area); | 483 | chsc_process_sei_scm_change(sei_area); |
470 | break; | 484 | break; |
485 | case 14: /* scm available notification */ | ||
486 | chsc_process_sei_scm_avail(sei_area); | ||
487 | break; | ||
471 | default: /* other stuff */ | 488 | default: /* other stuff */ |
472 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", | 489 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", |
473 | sei_area->cc); | 490 | sei_area->cc); |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 227e05f674b3..349d5fc47196 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); | |||
156 | 156 | ||
157 | #ifdef CONFIG_SCM_BUS | 157 | #ifdef CONFIG_SCM_BUS |
158 | int scm_update_information(void); | 158 | int scm_update_information(void); |
159 | int scm_process_availability_information(void); | ||
159 | #else /* CONFIG_SCM_BUS */ | 160 | #else /* CONFIG_SCM_BUS */ |
160 | static inline int scm_update_information(void) { return 0; } | 161 | static inline int scm_update_information(void) { return 0; } |
162 | static inline int scm_process_availability_information(void) { return 0; } | ||
161 | #endif /* CONFIG_SCM_BUS */ | 163 | #endif /* CONFIG_SCM_BUS */ |
162 | 164 | ||
163 | 165 | ||
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index bcf20f3aa51b..46ec25632e8b 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c | |||
@@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale) | |||
211 | goto out; | 211 | goto out; |
212 | scmdrv = to_scm_drv(scmdev->dev.driver); | 212 | scmdrv = to_scm_drv(scmdev->dev.driver); |
213 | if (changed && scmdrv->notify) | 213 | if (changed && scmdrv->notify) |
214 | scmdrv->notify(scmdev); | 214 | scmdrv->notify(scmdev, SCM_CHANGE); |
215 | out: | 215 | out: |
216 | device_unlock(&scmdev->dev); | 216 | device_unlock(&scmdev->dev); |
217 | if (changed) | 217 | if (changed) |
@@ -297,6 +297,22 @@ int scm_update_information(void) | |||
297 | return ret; | 297 | return ret; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int scm_dev_avail(struct device *dev, void *unused) | ||
301 | { | ||
302 | struct scm_driver *scmdrv = to_scm_drv(dev->driver); | ||
303 | struct scm_device *scmdev = to_scm_dev(dev); | ||
304 | |||
305 | if (dev->driver && scmdrv->notify) | ||
306 | scmdrv->notify(scmdev, SCM_AVAIL); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | int scm_process_availability_information(void) | ||
312 | { | ||
313 | return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); | ||
314 | } | ||
315 | |||
300 | static int __init scm_init(void) | 316 | static int __init scm_init(void) |
301 | { | 317 | { |
302 | int ret; | 318 | int ret; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d87961d4c0de..8c0622399fcd 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -916,6 +916,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, | |||
916 | void *reply_param); | 916 | void *reply_param); |
917 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); | 917 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); |
918 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); | 918 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); |
919 | int qeth_get_elements_for_frags(struct sk_buff *); | ||
919 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, | 920 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, |
920 | struct sk_buff *, struct qeth_hdr *, int, int, int); | 921 | struct sk_buff *, struct qeth_hdr *, int, int, int); |
921 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, | 922 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d8cdff81813..0d73a999983d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -3679,6 +3679,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3679 | } | 3679 | } |
3680 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | 3680 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); |
3681 | 3681 | ||
3682 | int qeth_get_elements_for_frags(struct sk_buff *skb) | ||
3683 | { | ||
3684 | int cnt, length, e, elements = 0; | ||
3685 | struct skb_frag_struct *frag; | ||
3686 | char *data; | ||
3687 | |||
3688 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | ||
3689 | frag = &skb_shinfo(skb)->frags[cnt]; | ||
3690 | data = (char *)page_to_phys(skb_frag_page(frag)) + | ||
3691 | frag->page_offset; | ||
3692 | length = frag->size; | ||
3693 | e = PFN_UP((unsigned long)data + length - 1) - | ||
3694 | PFN_DOWN((unsigned long)data); | ||
3695 | elements += e; | ||
3696 | } | ||
3697 | return elements; | ||
3698 | } | ||
3699 | EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | ||
3700 | |||
3682 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, | 3701 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, |
3683 | struct sk_buff *skb, int elems) | 3702 | struct sk_buff *skb, int elems) |
3684 | { | 3703 | { |
@@ -3686,7 +3705,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3686 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - | 3705 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - |
3687 | PFN_DOWN((unsigned long)skb->data); | 3706 | PFN_DOWN((unsigned long)skb->data); |
3688 | 3707 | ||
3689 | elements_needed += skb_shinfo(skb)->nr_frags; | 3708 | elements_needed += qeth_get_elements_for_frags(skb); |
3709 | |||
3690 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3710 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3691 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3711 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3692 | "(Number=%d / Length=%d). Discarded.\n", | 3712 | "(Number=%d / Length=%d). Discarded.\n", |
@@ -3771,12 +3791,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3771 | 3791 | ||
3772 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | 3792 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { |
3773 | frag = &skb_shinfo(skb)->frags[cnt]; | 3793 | frag = &skb_shinfo(skb)->frags[cnt]; |
3774 | buffer->element[element].addr = (char *) | 3794 | data = (char *)page_to_phys(skb_frag_page(frag)) + |
3775 | page_to_phys(skb_frag_page(frag)) | 3795 | frag->page_offset; |
3776 | + frag->page_offset; | 3796 | length = frag->size; |
3777 | buffer->element[element].length = frag->size; | 3797 | while (length > 0) { |
3778 | buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; | 3798 | length_here = PAGE_SIZE - |
3779 | element++; | 3799 | ((unsigned long) data % PAGE_SIZE); |
3800 | if (length < length_here) | ||
3801 | length_here = length; | ||
3802 | |||
3803 | buffer->element[element].addr = data; | ||
3804 | buffer->element[element].length = length_here; | ||
3805 | buffer->element[element].eflags = | ||
3806 | SBAL_EFLAGS_MIDDLE_FRAG; | ||
3807 | length -= length_here; | ||
3808 | data += length_here; | ||
3809 | element++; | ||
3810 | } | ||
3780 | } | 3811 | } |
3781 | 3812 | ||
3782 | if (buffer->element[element - 1].eflags) | 3813 | if (buffer->element[element - 1].eflags) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 091ca0efa1c5..8710337dab3e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
623 | return rc; | 623 | return rc; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void qeth_l3_correct_routing_type(struct qeth_card *card, | 626 | static int qeth_l3_correct_routing_type(struct qeth_card *card, |
627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) | 627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) |
628 | { | 628 | { |
629 | if (card->info.type == QETH_CARD_TYPE_IQD) { | 629 | if (card->info.type == QETH_CARD_TYPE_IQD) { |
@@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
632 | case PRIMARY_CONNECTOR: | 632 | case PRIMARY_CONNECTOR: |
633 | case SECONDARY_CONNECTOR: | 633 | case SECONDARY_CONNECTOR: |
634 | case MULTICAST_ROUTER: | 634 | case MULTICAST_ROUTER: |
635 | return; | 635 | return 0; |
636 | default: | 636 | default: |
637 | goto out_inval; | 637 | goto out_inval; |
638 | } | 638 | } |
@@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
641 | case NO_ROUTER: | 641 | case NO_ROUTER: |
642 | case PRIMARY_ROUTER: | 642 | case PRIMARY_ROUTER: |
643 | case SECONDARY_ROUTER: | 643 | case SECONDARY_ROUTER: |
644 | return; | 644 | return 0; |
645 | case MULTICAST_ROUTER: | 645 | case MULTICAST_ROUTER: |
646 | if (qeth_is_ipafunc_supported(card, prot, | 646 | if (qeth_is_ipafunc_supported(card, prot, |
647 | IPA_OSA_MC_ROUTER)) | 647 | IPA_OSA_MC_ROUTER)) |
648 | return; | 648 | return 0; |
649 | default: | 649 | default: |
650 | goto out_inval; | 650 | goto out_inval; |
651 | } | 651 | } |
652 | } | 652 | } |
653 | out_inval: | 653 | out_inval: |
654 | *type = NO_ROUTER; | 654 | *type = NO_ROUTER; |
655 | return -EINVAL; | ||
655 | } | 656 | } |
656 | 657 | ||
657 | int qeth_l3_setrouting_v4(struct qeth_card *card) | 658 | int qeth_l3_setrouting_v4(struct qeth_card *card) |
@@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
660 | 661 | ||
661 | QETH_CARD_TEXT(card, 3, "setrtg4"); | 662 | QETH_CARD_TEXT(card, 3, "setrtg4"); |
662 | 663 | ||
663 | qeth_l3_correct_routing_type(card, &card->options.route4.type, | 664 | rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, |
664 | QETH_PROT_IPV4); | 665 | QETH_PROT_IPV4); |
666 | if (rc) | ||
667 | return rc; | ||
665 | 668 | ||
666 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, | 669 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, |
667 | QETH_PROT_IPV4); | 670 | QETH_PROT_IPV4); |
@@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
683 | 686 | ||
684 | if (!qeth_is_supported(card, IPA_IPV6)) | 687 | if (!qeth_is_supported(card, IPA_IPV6)) |
685 | return 0; | 688 | return 0; |
686 | qeth_l3_correct_routing_type(card, &card->options.route6.type, | 689 | rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, |
687 | QETH_PROT_IPV6); | 690 | QETH_PROT_IPV6); |
691 | if (rc) | ||
692 | return rc; | ||
688 | 693 | ||
689 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, | 694 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, |
690 | QETH_PROT_IPV6); | 695 | QETH_PROT_IPV6); |
@@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) | |||
2898 | tcp_hdr(skb)->doff * 4; | 2903 | tcp_hdr(skb)->doff * 4; |
2899 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); | 2904 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); |
2900 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); | 2905 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); |
2901 | elements += skb_shinfo(skb)->nr_frags; | 2906 | |
2907 | elements += qeth_get_elements_for_frags(skb); | ||
2908 | |||
2902 | return elements; | 2909 | return elements; |
2903 | } | 2910 | } |
2904 | 2911 | ||
@@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3348 | rc = -ENODEV; | 3355 | rc = -ENODEV; |
3349 | goto out_remove; | 3356 | goto out_remove; |
3350 | } | 3357 | } |
3351 | qeth_trace_features(card); | ||
3352 | 3358 | ||
3353 | if (!card->dev && qeth_l3_setup_netdev(card)) { | 3359 | if (!card->dev && qeth_l3_setup_netdev(card)) { |
3354 | rc = -ENODEV; | 3360 | rc = -ENODEV; |
@@ -3425,6 +3431,7 @@ contin: | |||
3425 | qeth_l3_set_multicast_list(card->dev); | 3431 | qeth_l3_set_multicast_list(card->dev); |
3426 | rtnl_unlock(); | 3432 | rtnl_unlock(); |
3427 | } | 3433 | } |
3434 | qeth_trace_features(card); | ||
3428 | /* let user_space know that device is online */ | 3435 | /* let user_space know that device is online */ |
3429 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3436 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
3430 | mutex_unlock(&card->conf_mutex); | 3437 | mutex_unlock(&card->conf_mutex); |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index ebc379486267..e70af2406ff9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, | |||
87 | rc = qeth_l3_setrouting_v6(card); | 87 | rc = qeth_l3_setrouting_v6(card); |
88 | } | 88 | } |
89 | out: | 89 | out: |
90 | if (rc) | ||
91 | route->type = old_route_type; | ||
90 | mutex_unlock(&card->conf_mutex); | 92 | mutex_unlock(&card->conf_mutex); |
91 | return rc ? rc : count; | 93 | return rc ? rc : count; |
92 | } | 94 | } |