diff options
Diffstat (limited to 'fs')
200 files changed, 4245 insertions, 2243 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index c2a377cdda2b..83eab52fb3f6 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -38,6 +38,7 @@ config FS_DAX | |||
| 38 | bool "Direct Access (DAX) support" | 38 | bool "Direct Access (DAX) support" |
| 39 | depends on MMU | 39 | depends on MMU |
| 40 | depends on !(ARM || MIPS || SPARC) | 40 | depends on !(ARM || MIPS || SPARC) |
| 41 | select FS_IOMAP | ||
| 41 | help | 42 | help |
| 42 | Direct Access (DAX) can be used on memory-backed block devices. | 43 | Direct Access (DAX) can be used on memory-backed block devices. |
| 43 | If the block device supports DAX and the filesystem supports DAX, | 44 | If the block device supports DAX and the filesystem supports DAX, |
diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 1e9d2f84e5b5..b29447e03ede 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c | |||
| @@ -343,7 +343,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work) | |||
| 343 | * had callbacks entirely, and the server will call us later to break | 343 | * had callbacks entirely, and the server will call us later to break |
| 344 | * them | 344 | * them |
| 345 | */ | 345 | */ |
| 346 | afs_fs_give_up_callbacks(server, &afs_async_call); | 346 | afs_fs_give_up_callbacks(server, true); |
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | /* | 349 | /* |
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index d764236072b1..2edbdcbf6432 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
| @@ -24,65 +24,86 @@ static int afs_deliver_cb_callback(struct afs_call *); | |||
| 24 | static int afs_deliver_cb_probe_uuid(struct afs_call *); | 24 | static int afs_deliver_cb_probe_uuid(struct afs_call *); |
| 25 | static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *); | 25 | static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *); |
| 26 | static void afs_cm_destructor(struct afs_call *); | 26 | static void afs_cm_destructor(struct afs_call *); |
| 27 | static void SRXAFSCB_CallBack(struct work_struct *); | ||
| 28 | static void SRXAFSCB_InitCallBackState(struct work_struct *); | ||
| 29 | static void SRXAFSCB_Probe(struct work_struct *); | ||
| 30 | static void SRXAFSCB_ProbeUuid(struct work_struct *); | ||
| 31 | static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); | ||
| 32 | |||
| 33 | #define CM_NAME(name) \ | ||
| 34 | const char afs_SRXCB##name##_name[] __tracepoint_string = \ | ||
| 35 | "CB." #name | ||
| 27 | 36 | ||
| 28 | /* | 37 | /* |
| 29 | * CB.CallBack operation type | 38 | * CB.CallBack operation type |
| 30 | */ | 39 | */ |
| 40 | static CM_NAME(CallBack); | ||
| 31 | static const struct afs_call_type afs_SRXCBCallBack = { | 41 | static const struct afs_call_type afs_SRXCBCallBack = { |
| 32 | .name = "CB.CallBack", | 42 | .name = afs_SRXCBCallBack_name, |
| 33 | .deliver = afs_deliver_cb_callback, | 43 | .deliver = afs_deliver_cb_callback, |
| 34 | .abort_to_error = afs_abort_to_error, | 44 | .abort_to_error = afs_abort_to_error, |
| 35 | .destructor = afs_cm_destructor, | 45 | .destructor = afs_cm_destructor, |
| 46 | .work = SRXAFSCB_CallBack, | ||
| 36 | }; | 47 | }; |
| 37 | 48 | ||
| 38 | /* | 49 | /* |
| 39 | * CB.InitCallBackState operation type | 50 | * CB.InitCallBackState operation type |
| 40 | */ | 51 | */ |
| 52 | static CM_NAME(InitCallBackState); | ||
| 41 | static const struct afs_call_type afs_SRXCBInitCallBackState = { | 53 | static const struct afs_call_type afs_SRXCBInitCallBackState = { |
| 42 | .name = "CB.InitCallBackState", | 54 | .name = afs_SRXCBInitCallBackState_name, |
| 43 | .deliver = afs_deliver_cb_init_call_back_state, | 55 | .deliver = afs_deliver_cb_init_call_back_state, |
| 44 | .abort_to_error = afs_abort_to_error, | 56 | .abort_to_error = afs_abort_to_error, |
| 45 | .destructor = afs_cm_destructor, | 57 | .destructor = afs_cm_destructor, |
| 58 | .work = SRXAFSCB_InitCallBackState, | ||
| 46 | }; | 59 | }; |
| 47 | 60 | ||
| 48 | /* | 61 | /* |
| 49 | * CB.InitCallBackState3 operation type | 62 | * CB.InitCallBackState3 operation type |
| 50 | */ | 63 | */ |
| 64 | static CM_NAME(InitCallBackState3); | ||
| 51 | static const struct afs_call_type afs_SRXCBInitCallBackState3 = { | 65 | static const struct afs_call_type afs_SRXCBInitCallBackState3 = { |
| 52 | .name = "CB.InitCallBackState3", | 66 | .name = afs_SRXCBInitCallBackState3_name, |
| 53 | .deliver = afs_deliver_cb_init_call_back_state3, | 67 | .deliver = afs_deliver_cb_init_call_back_state3, |
| 54 | .abort_to_error = afs_abort_to_error, | 68 | .abort_to_error = afs_abort_to_error, |
| 55 | .destructor = afs_cm_destructor, | 69 | .destructor = afs_cm_destructor, |
| 70 | .work = SRXAFSCB_InitCallBackState, | ||
| 56 | }; | 71 | }; |
| 57 | 72 | ||
| 58 | /* | 73 | /* |
| 59 | * CB.Probe operation type | 74 | * CB.Probe operation type |
| 60 | */ | 75 | */ |
| 76 | static CM_NAME(Probe); | ||
| 61 | static const struct afs_call_type afs_SRXCBProbe = { | 77 | static const struct afs_call_type afs_SRXCBProbe = { |
| 62 | .name = "CB.Probe", | 78 | .name = afs_SRXCBProbe_name, |
| 63 | .deliver = afs_deliver_cb_probe, | 79 | .deliver = afs_deliver_cb_probe, |
| 64 | .abort_to_error = afs_abort_to_error, | 80 | .abort_to_error = afs_abort_to_error, |
| 65 | .destructor = afs_cm_destructor, | 81 | .destructor = afs_cm_destructor, |
| 82 | .work = SRXAFSCB_Probe, | ||
| 66 | }; | 83 | }; |
| 67 | 84 | ||
| 68 | /* | 85 | /* |
| 69 | * CB.ProbeUuid operation type | 86 | * CB.ProbeUuid operation type |
| 70 | */ | 87 | */ |
| 88 | static CM_NAME(ProbeUuid); | ||
| 71 | static const struct afs_call_type afs_SRXCBProbeUuid = { | 89 | static const struct afs_call_type afs_SRXCBProbeUuid = { |
| 72 | .name = "CB.ProbeUuid", | 90 | .name = afs_SRXCBProbeUuid_name, |
| 73 | .deliver = afs_deliver_cb_probe_uuid, | 91 | .deliver = afs_deliver_cb_probe_uuid, |
| 74 | .abort_to_error = afs_abort_to_error, | 92 | .abort_to_error = afs_abort_to_error, |
| 75 | .destructor = afs_cm_destructor, | 93 | .destructor = afs_cm_destructor, |
| 94 | .work = SRXAFSCB_ProbeUuid, | ||
| 76 | }; | 95 | }; |
| 77 | 96 | ||
| 78 | /* | 97 | /* |
| 79 | * CB.TellMeAboutYourself operation type | 98 | * CB.TellMeAboutYourself operation type |
| 80 | */ | 99 | */ |
| 100 | static CM_NAME(TellMeAboutYourself); | ||
| 81 | static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { | 101 | static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { |
| 82 | .name = "CB.TellMeAboutYourself", | 102 | .name = afs_SRXCBTellMeAboutYourself_name, |
| 83 | .deliver = afs_deliver_cb_tell_me_about_yourself, | 103 | .deliver = afs_deliver_cb_tell_me_about_yourself, |
| 84 | .abort_to_error = afs_abort_to_error, | 104 | .abort_to_error = afs_abort_to_error, |
| 85 | .destructor = afs_cm_destructor, | 105 | .destructor = afs_cm_destructor, |
| 106 | .work = SRXAFSCB_TellMeAboutYourself, | ||
| 86 | }; | 107 | }; |
| 87 | 108 | ||
| 88 | /* | 109 | /* |
| @@ -153,6 +174,7 @@ static void SRXAFSCB_CallBack(struct work_struct *work) | |||
| 153 | afs_send_empty_reply(call); | 174 | afs_send_empty_reply(call); |
| 154 | 175 | ||
| 155 | afs_break_callbacks(call->server, call->count, call->request); | 176 | afs_break_callbacks(call->server, call->count, call->request); |
| 177 | afs_put_call(call); | ||
| 156 | _leave(""); | 178 | _leave(""); |
| 157 | } | 179 | } |
| 158 | 180 | ||
| @@ -274,9 +296,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
| 274 | return -ENOTCONN; | 296 | return -ENOTCONN; |
| 275 | call->server = server; | 297 | call->server = server; |
| 276 | 298 | ||
| 277 | INIT_WORK(&call->work, SRXAFSCB_CallBack); | 299 | return afs_queue_call_work(call); |
| 278 | queue_work(afs_wq, &call->work); | ||
| 279 | return 0; | ||
| 280 | } | 300 | } |
| 281 | 301 | ||
| 282 | /* | 302 | /* |
| @@ -290,6 +310,7 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work) | |||
| 290 | 310 | ||
| 291 | afs_init_callback_state(call->server); | 311 | afs_init_callback_state(call->server); |
| 292 | afs_send_empty_reply(call); | 312 | afs_send_empty_reply(call); |
| 313 | afs_put_call(call); | ||
| 293 | _leave(""); | 314 | _leave(""); |
| 294 | } | 315 | } |
| 295 | 316 | ||
| @@ -320,9 +341,7 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call) | |||
| 320 | return -ENOTCONN; | 341 | return -ENOTCONN; |
| 321 | call->server = server; | 342 | call->server = server; |
| 322 | 343 | ||
| 323 | INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); | 344 | return afs_queue_call_work(call); |
| 324 | queue_work(afs_wq, &call->work); | ||
| 325 | return 0; | ||
| 326 | } | 345 | } |
| 327 | 346 | ||
| 328 | /* | 347 | /* |
| @@ -332,7 +351,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) | |||
| 332 | { | 351 | { |
| 333 | struct sockaddr_rxrpc srx; | 352 | struct sockaddr_rxrpc srx; |
| 334 | struct afs_server *server; | 353 | struct afs_server *server; |
| 335 | struct afs_uuid *r; | 354 | struct uuid_v1 *r; |
| 336 | unsigned loop; | 355 | unsigned loop; |
| 337 | __be32 *b; | 356 | __be32 *b; |
| 338 | int ret; | 357 | int ret; |
| @@ -362,15 +381,15 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) | |||
| 362 | } | 381 | } |
| 363 | 382 | ||
| 364 | _debug("unmarshall UUID"); | 383 | _debug("unmarshall UUID"); |
| 365 | call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); | 384 | call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL); |
| 366 | if (!call->request) | 385 | if (!call->request) |
| 367 | return -ENOMEM; | 386 | return -ENOMEM; |
| 368 | 387 | ||
| 369 | b = call->buffer; | 388 | b = call->buffer; |
| 370 | r = call->request; | 389 | r = call->request; |
| 371 | r->time_low = ntohl(b[0]); | 390 | r->time_low = b[0]; |
| 372 | r->time_mid = ntohl(b[1]); | 391 | r->time_mid = htons(ntohl(b[1])); |
| 373 | r->time_hi_and_version = ntohl(b[2]); | 392 | r->time_hi_and_version = htons(ntohl(b[2])); |
| 374 | r->clock_seq_hi_and_reserved = ntohl(b[3]); | 393 | r->clock_seq_hi_and_reserved = ntohl(b[3]); |
| 375 | r->clock_seq_low = ntohl(b[4]); | 394 | r->clock_seq_low = ntohl(b[4]); |
| 376 | 395 | ||
| @@ -394,9 +413,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) | |||
| 394 | return -ENOTCONN; | 413 | return -ENOTCONN; |
| 395 | call->server = server; | 414 | call->server = server; |
| 396 | 415 | ||
| 397 | INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); | 416 | return afs_queue_call_work(call); |
| 398 | queue_work(afs_wq, &call->work); | ||
| 399 | return 0; | ||
| 400 | } | 417 | } |
| 401 | 418 | ||
| 402 | /* | 419 | /* |
| @@ -408,6 +425,7 @@ static void SRXAFSCB_Probe(struct work_struct *work) | |||
| 408 | 425 | ||
| 409 | _enter(""); | 426 | _enter(""); |
| 410 | afs_send_empty_reply(call); | 427 | afs_send_empty_reply(call); |
| 428 | afs_put_call(call); | ||
| 411 | _leave(""); | 429 | _leave(""); |
| 412 | } | 430 | } |
| 413 | 431 | ||
| @@ -427,9 +445,7 @@ static int afs_deliver_cb_probe(struct afs_call *call) | |||
| 427 | /* no unmarshalling required */ | 445 | /* no unmarshalling required */ |
| 428 | call->state = AFS_CALL_REPLYING; | 446 | call->state = AFS_CALL_REPLYING; |
| 429 | 447 | ||
| 430 | INIT_WORK(&call->work, SRXAFSCB_Probe); | 448 | return afs_queue_call_work(call); |
| 431 | queue_work(afs_wq, &call->work); | ||
| 432 | return 0; | ||
| 433 | } | 449 | } |
| 434 | 450 | ||
| 435 | /* | 451 | /* |
| @@ -438,7 +454,7 @@ static int afs_deliver_cb_probe(struct afs_call *call) | |||
| 438 | static void SRXAFSCB_ProbeUuid(struct work_struct *work) | 454 | static void SRXAFSCB_ProbeUuid(struct work_struct *work) |
| 439 | { | 455 | { |
| 440 | struct afs_call *call = container_of(work, struct afs_call, work); | 456 | struct afs_call *call = container_of(work, struct afs_call, work); |
| 441 | struct afs_uuid *r = call->request; | 457 | struct uuid_v1 *r = call->request; |
| 442 | 458 | ||
| 443 | struct { | 459 | struct { |
| 444 | __be32 match; | 460 | __be32 match; |
| @@ -452,6 +468,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) | |||
| 452 | reply.match = htonl(1); | 468 | reply.match = htonl(1); |
| 453 | 469 | ||
| 454 | afs_send_simple_reply(call, &reply, sizeof(reply)); | 470 | afs_send_simple_reply(call, &reply, sizeof(reply)); |
| 471 | afs_put_call(call); | ||
| 455 | _leave(""); | 472 | _leave(""); |
| 456 | } | 473 | } |
| 457 | 474 | ||
| @@ -460,7 +477,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) | |||
| 460 | */ | 477 | */ |
| 461 | static int afs_deliver_cb_probe_uuid(struct afs_call *call) | 478 | static int afs_deliver_cb_probe_uuid(struct afs_call *call) |
| 462 | { | 479 | { |
| 463 | struct afs_uuid *r; | 480 | struct uuid_v1 *r; |
| 464 | unsigned loop; | 481 | unsigned loop; |
| 465 | __be32 *b; | 482 | __be32 *b; |
| 466 | int ret; | 483 | int ret; |
| @@ -486,15 +503,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) | |||
| 486 | } | 503 | } |
| 487 | 504 | ||
| 488 | _debug("unmarshall UUID"); | 505 | _debug("unmarshall UUID"); |
| 489 | call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); | 506 | call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL); |
| 490 | if (!call->request) | 507 | if (!call->request) |
| 491 | return -ENOMEM; | 508 | return -ENOMEM; |
| 492 | 509 | ||
| 493 | b = call->buffer; | 510 | b = call->buffer; |
| 494 | r = call->request; | 511 | r = call->request; |
| 495 | r->time_low = ntohl(b[0]); | 512 | r->time_low = b[0]; |
| 496 | r->time_mid = ntohl(b[1]); | 513 | r->time_mid = htons(ntohl(b[1])); |
| 497 | r->time_hi_and_version = ntohl(b[2]); | 514 | r->time_hi_and_version = htons(ntohl(b[2])); |
| 498 | r->clock_seq_hi_and_reserved = ntohl(b[3]); | 515 | r->clock_seq_hi_and_reserved = ntohl(b[3]); |
| 499 | r->clock_seq_low = ntohl(b[4]); | 516 | r->clock_seq_low = ntohl(b[4]); |
| 500 | 517 | ||
| @@ -510,9 +527,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) | |||
| 510 | 527 | ||
| 511 | call->state = AFS_CALL_REPLYING; | 528 | call->state = AFS_CALL_REPLYING; |
| 512 | 529 | ||
| 513 | INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); | 530 | return afs_queue_call_work(call); |
| 514 | queue_work(afs_wq, &call->work); | ||
| 515 | return 0; | ||
| 516 | } | 531 | } |
| 517 | 532 | ||
| 518 | /* | 533 | /* |
| @@ -554,9 +569,9 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) | |||
| 554 | memset(&reply, 0, sizeof(reply)); | 569 | memset(&reply, 0, sizeof(reply)); |
| 555 | reply.ia.nifs = htonl(nifs); | 570 | reply.ia.nifs = htonl(nifs); |
| 556 | 571 | ||
| 557 | reply.ia.uuid[0] = htonl(afs_uuid.time_low); | 572 | reply.ia.uuid[0] = afs_uuid.time_low; |
| 558 | reply.ia.uuid[1] = htonl(afs_uuid.time_mid); | 573 | reply.ia.uuid[1] = htonl(ntohs(afs_uuid.time_mid)); |
| 559 | reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version); | 574 | reply.ia.uuid[2] = htonl(ntohs(afs_uuid.time_hi_and_version)); |
| 560 | reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved); | 575 | reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved); |
| 561 | reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low); | 576 | reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low); |
| 562 | for (loop = 0; loop < 6; loop++) | 577 | for (loop = 0; loop < 6; loop++) |
| @@ -574,7 +589,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) | |||
| 574 | reply.cap.capcount = htonl(1); | 589 | reply.cap.capcount = htonl(1); |
| 575 | reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); | 590 | reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); |
| 576 | afs_send_simple_reply(call, &reply, sizeof(reply)); | 591 | afs_send_simple_reply(call, &reply, sizeof(reply)); |
| 577 | 592 | afs_put_call(call); | |
| 578 | _leave(""); | 593 | _leave(""); |
| 579 | } | 594 | } |
| 580 | 595 | ||
| @@ -594,7 +609,5 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call) | |||
| 594 | /* no unmarshalling required */ | 609 | /* no unmarshalling required */ |
| 595 | call->state = AFS_CALL_REPLYING; | 610 | call->state = AFS_CALL_REPLYING; |
| 596 | 611 | ||
| 597 | INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); | 612 | return afs_queue_call_work(call); |
| 598 | queue_work(afs_wq, &call->work); | ||
| 599 | return 0; | ||
| 600 | } | 613 | } |
diff --git a/fs/afs/file.c b/fs/afs/file.c index 6344aee4ac4b..ba7b71fba34b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/gfp.h> | 18 | #include <linux/gfp.h> |
| 19 | #include <linux/task_io_accounting_ops.h> | ||
| 19 | #include "internal.h" | 20 | #include "internal.h" |
| 20 | 21 | ||
| 21 | static int afs_readpage(struct file *file, struct page *page); | 22 | static int afs_readpage(struct file *file, struct page *page); |
| @@ -101,6 +102,21 @@ int afs_release(struct inode *inode, struct file *file) | |||
| 101 | return 0; | 102 | return 0; |
| 102 | } | 103 | } |
| 103 | 104 | ||
| 105 | /* | ||
| 106 | * Dispose of a ref to a read record. | ||
| 107 | */ | ||
| 108 | void afs_put_read(struct afs_read *req) | ||
| 109 | { | ||
| 110 | int i; | ||
| 111 | |||
| 112 | if (atomic_dec_and_test(&req->usage)) { | ||
| 113 | for (i = 0; i < req->nr_pages; i++) | ||
| 114 | if (req->pages[i]) | ||
| 115 | put_page(req->pages[i]); | ||
| 116 | kfree(req); | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 104 | #ifdef CONFIG_AFS_FSCACHE | 120 | #ifdef CONFIG_AFS_FSCACHE |
| 105 | /* | 121 | /* |
| 106 | * deal with notification that a page was read from the cache | 122 | * deal with notification that a page was read from the cache |
| @@ -126,9 +142,8 @@ int afs_page_filler(void *data, struct page *page) | |||
| 126 | { | 142 | { |
| 127 | struct inode *inode = page->mapping->host; | 143 | struct inode *inode = page->mapping->host; |
| 128 | struct afs_vnode *vnode = AFS_FS_I(inode); | 144 | struct afs_vnode *vnode = AFS_FS_I(inode); |
| 145 | struct afs_read *req; | ||
| 129 | struct key *key = data; | 146 | struct key *key = data; |
| 130 | size_t len; | ||
| 131 | off_t offset; | ||
| 132 | int ret; | 147 | int ret; |
| 133 | 148 | ||
| 134 | _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); | 149 | _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); |
| @@ -164,12 +179,23 @@ int afs_page_filler(void *data, struct page *page) | |||
| 164 | _debug("cache said ENOBUFS"); | 179 | _debug("cache said ENOBUFS"); |
| 165 | default: | 180 | default: |
| 166 | go_on: | 181 | go_on: |
| 167 | offset = page->index << PAGE_SHIFT; | 182 | req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), |
| 168 | len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); | 183 | GFP_KERNEL); |
| 184 | if (!req) | ||
| 185 | goto enomem; | ||
| 186 | |||
| 187 | atomic_set(&req->usage, 1); | ||
| 188 | req->pos = (loff_t)page->index << PAGE_SHIFT; | ||
| 189 | req->len = min_t(size_t, i_size_read(inode) - req->pos, | ||
| 190 | PAGE_SIZE); | ||
| 191 | req->nr_pages = 1; | ||
| 192 | req->pages[0] = page; | ||
| 193 | get_page(page); | ||
| 169 | 194 | ||
| 170 | /* read the contents of the file from the server into the | 195 | /* read the contents of the file from the server into the |
| 171 | * page */ | 196 | * page */ |
| 172 | ret = afs_vnode_fetch_data(vnode, key, offset, len, page); | 197 | ret = afs_vnode_fetch_data(vnode, key, req); |
| 198 | afs_put_read(req); | ||
| 173 | if (ret < 0) { | 199 | if (ret < 0) { |
| 174 | if (ret == -ENOENT) { | 200 | if (ret == -ENOENT) { |
| 175 | _debug("got NOENT from server" | 201 | _debug("got NOENT from server" |
| @@ -201,6 +227,8 @@ int afs_page_filler(void *data, struct page *page) | |||
| 201 | _leave(" = 0"); | 227 | _leave(" = 0"); |
| 202 | return 0; | 228 | return 0; |
| 203 | 229 | ||
| 230 | enomem: | ||
| 231 | ret = -ENOMEM; | ||
| 204 | error: | 232 | error: |
| 205 | SetPageError(page); | 233 | SetPageError(page); |
| 206 | unlock_page(page); | 234 | unlock_page(page); |
| @@ -235,6 +263,131 @@ static int afs_readpage(struct file *file, struct page *page) | |||
| 235 | } | 263 | } |
| 236 | 264 | ||
| 237 | /* | 265 | /* |
| 266 | * Make pages available as they're filled. | ||
| 267 | */ | ||
| 268 | static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req) | ||
| 269 | { | ||
| 270 | #ifdef CONFIG_AFS_FSCACHE | ||
| 271 | struct afs_vnode *vnode = call->reply; | ||
| 272 | #endif | ||
| 273 | struct page *page = req->pages[req->index]; | ||
| 274 | |||
| 275 | req->pages[req->index] = NULL; | ||
| 276 | SetPageUptodate(page); | ||
| 277 | |||
| 278 | /* send the page to the cache */ | ||
| 279 | #ifdef CONFIG_AFS_FSCACHE | ||
| 280 | if (PageFsCache(page) && | ||
| 281 | fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { | ||
| 282 | fscache_uncache_page(vnode->cache, page); | ||
| 283 | BUG_ON(PageFsCache(page)); | ||
| 284 | } | ||
| 285 | #endif | ||
| 286 | unlock_page(page); | ||
| 287 | put_page(page); | ||
| 288 | } | ||
| 289 | |||
| 290 | /* | ||
| 291 | * Read a contiguous set of pages. | ||
| 292 | */ | ||
| 293 | static int afs_readpages_one(struct file *file, struct address_space *mapping, | ||
| 294 | struct list_head *pages) | ||
| 295 | { | ||
| 296 | struct afs_vnode *vnode = AFS_FS_I(mapping->host); | ||
| 297 | struct afs_read *req; | ||
| 298 | struct list_head *p; | ||
| 299 | struct page *first, *page; | ||
| 300 | struct key *key = file->private_data; | ||
| 301 | pgoff_t index; | ||
| 302 | int ret, n, i; | ||
| 303 | |||
| 304 | /* Count the number of contiguous pages at the front of the list. Note | ||
| 305 | * that the list goes prev-wards rather than next-wards. | ||
| 306 | */ | ||
| 307 | first = list_entry(pages->prev, struct page, lru); | ||
| 308 | index = first->index + 1; | ||
| 309 | n = 1; | ||
| 310 | for (p = first->lru.prev; p != pages; p = p->prev) { | ||
| 311 | page = list_entry(p, struct page, lru); | ||
| 312 | if (page->index != index) | ||
| 313 | break; | ||
| 314 | index++; | ||
| 315 | n++; | ||
| 316 | } | ||
| 317 | |||
| 318 | req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n, | ||
| 319 | GFP_NOFS); | ||
| 320 | if (!req) | ||
| 321 | return -ENOMEM; | ||
| 322 | |||
| 323 | atomic_set(&req->usage, 1); | ||
| 324 | req->page_done = afs_readpages_page_done; | ||
| 325 | req->pos = first->index; | ||
| 326 | req->pos <<= PAGE_SHIFT; | ||
| 327 | |||
| 328 | /* Transfer the pages to the request. We add them in until one fails | ||
| 329 | * to add to the LRU and then we stop (as that'll make a hole in the | ||
| 330 | * contiguous run. | ||
| 331 | * | ||
| 332 | * Note that it's possible for the file size to change whilst we're | ||
| 333 | * doing this, but we rely on the server returning less than we asked | ||
| 334 | * for if the file shrank. We also rely on this to deal with a partial | ||
| 335 | * page at the end of the file. | ||
| 336 | */ | ||
| 337 | do { | ||
| 338 | page = list_entry(pages->prev, struct page, lru); | ||
| 339 | list_del(&page->lru); | ||
| 340 | index = page->index; | ||
| 341 | if (add_to_page_cache_lru(page, mapping, index, | ||
| 342 | readahead_gfp_mask(mapping))) { | ||
| 343 | #ifdef CONFIG_AFS_FSCACHE | ||
| 344 | fscache_uncache_page(vnode->cache, page); | ||
| 345 | #endif | ||
| 346 | put_page(page); | ||
| 347 | break; | ||
| 348 | } | ||
| 349 | |||
| 350 | req->pages[req->nr_pages++] = page; | ||
| 351 | req->len += PAGE_SIZE; | ||
| 352 | } while (req->nr_pages < n); | ||
| 353 | |||
| 354 | if (req->nr_pages == 0) { | ||
| 355 | kfree(req); | ||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 359 | ret = afs_vnode_fetch_data(vnode, key, req); | ||
| 360 | if (ret < 0) | ||
| 361 | goto error; | ||
| 362 | |||
| 363 | task_io_account_read(PAGE_SIZE * req->nr_pages); | ||
| 364 | afs_put_read(req); | ||
| 365 | return 0; | ||
| 366 | |||
| 367 | error: | ||
| 368 | if (ret == -ENOENT) { | ||
| 369 | _debug("got NOENT from server" | ||
| 370 | " - marking file deleted and stale"); | ||
| 371 | set_bit(AFS_VNODE_DELETED, &vnode->flags); | ||
| 372 | ret = -ESTALE; | ||
| 373 | } | ||
| 374 | |||
| 375 | for (i = 0; i < req->nr_pages; i++) { | ||
| 376 | page = req->pages[i]; | ||
| 377 | if (page) { | ||
| 378 | #ifdef CONFIG_AFS_FSCACHE | ||
| 379 | fscache_uncache_page(vnode->cache, page); | ||
| 380 | #endif | ||
| 381 | SetPageError(page); | ||
| 382 | unlock_page(page); | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | afs_put_read(req); | ||
| 387 | return ret; | ||
| 388 | } | ||
| 389 | |||
| 390 | /* | ||
| 238 | * read a set of pages | 391 | * read a set of pages |
| 239 | */ | 392 | */ |
| 240 | static int afs_readpages(struct file *file, struct address_space *mapping, | 393 | static int afs_readpages(struct file *file, struct address_space *mapping, |
| @@ -287,8 +440,11 @@ static int afs_readpages(struct file *file, struct address_space *mapping, | |||
| 287 | return ret; | 440 | return ret; |
| 288 | } | 441 | } |
| 289 | 442 | ||
| 290 | /* load the missing pages from the network */ | 443 | while (!list_empty(pages)) { |
| 291 | ret = read_cache_pages(mapping, pages, afs_page_filler, key); | 444 | ret = afs_readpages_one(file, mapping, pages); |
| 445 | if (ret < 0) | ||
| 446 | break; | ||
| 447 | } | ||
| 292 | 448 | ||
| 293 | _leave(" = %d [netting]", ret); | 449 | _leave(" = %d [netting]", ret); |
| 294 | return ret; | 450 | return ret; |
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 31c616ab9b40..ac8e766978dc 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
| @@ -275,7 +275,7 @@ int afs_fs_fetch_file_status(struct afs_server *server, | |||
| 275 | struct key *key, | 275 | struct key *key, |
| 276 | struct afs_vnode *vnode, | 276 | struct afs_vnode *vnode, |
| 277 | struct afs_volsync *volsync, | 277 | struct afs_volsync *volsync, |
| 278 | const struct afs_wait_mode *wait_mode) | 278 | bool async) |
| 279 | { | 279 | { |
| 280 | struct afs_call *call; | 280 | struct afs_call *call; |
| 281 | __be32 *bp; | 281 | __be32 *bp; |
| @@ -300,7 +300,7 @@ int afs_fs_fetch_file_status(struct afs_server *server, | |||
| 300 | bp[2] = htonl(vnode->fid.vnode); | 300 | bp[2] = htonl(vnode->fid.vnode); |
| 301 | bp[3] = htonl(vnode->fid.unique); | 301 | bp[3] = htonl(vnode->fid.unique); |
| 302 | 302 | ||
| 303 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 303 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | /* | 306 | /* |
| @@ -309,15 +309,19 @@ int afs_fs_fetch_file_status(struct afs_server *server, | |||
| 309 | static int afs_deliver_fs_fetch_data(struct afs_call *call) | 309 | static int afs_deliver_fs_fetch_data(struct afs_call *call) |
| 310 | { | 310 | { |
| 311 | struct afs_vnode *vnode = call->reply; | 311 | struct afs_vnode *vnode = call->reply; |
| 312 | struct afs_read *req = call->reply3; | ||
| 312 | const __be32 *bp; | 313 | const __be32 *bp; |
| 313 | struct page *page; | 314 | unsigned int size; |
| 314 | void *buffer; | 315 | void *buffer; |
| 315 | int ret; | 316 | int ret; |
| 316 | 317 | ||
| 317 | _enter("{%u}", call->unmarshall); | 318 | _enter("{%u,%zu/%u;%u/%llu}", |
| 319 | call->unmarshall, call->offset, call->count, | ||
| 320 | req->remain, req->actual_len); | ||
| 318 | 321 | ||
| 319 | switch (call->unmarshall) { | 322 | switch (call->unmarshall) { |
| 320 | case 0: | 323 | case 0: |
| 324 | req->actual_len = 0; | ||
| 321 | call->offset = 0; | 325 | call->offset = 0; |
| 322 | call->unmarshall++; | 326 | call->unmarshall++; |
| 323 | if (call->operation_ID != FSFETCHDATA64) { | 327 | if (call->operation_ID != FSFETCHDATA64) { |
| @@ -334,10 +338,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 334 | if (ret < 0) | 338 | if (ret < 0) |
| 335 | return ret; | 339 | return ret; |
| 336 | 340 | ||
| 337 | call->count = ntohl(call->tmp); | 341 | req->actual_len = ntohl(call->tmp); |
| 338 | _debug("DATA length MSW: %u", call->count); | 342 | req->actual_len <<= 32; |
| 339 | if (call->count > 0) | ||
| 340 | return -EBADMSG; | ||
| 341 | call->offset = 0; | 343 | call->offset = 0; |
| 342 | call->unmarshall++; | 344 | call->unmarshall++; |
| 343 | 345 | ||
| @@ -349,26 +351,52 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 349 | if (ret < 0) | 351 | if (ret < 0) |
| 350 | return ret; | 352 | return ret; |
| 351 | 353 | ||
| 352 | call->count = ntohl(call->tmp); | 354 | req->actual_len |= ntohl(call->tmp); |
| 353 | _debug("DATA length: %u", call->count); | 355 | _debug("DATA length: %llu", req->actual_len); |
| 354 | if (call->count > PAGE_SIZE) | 356 | /* Check that the server didn't want to send us extra. We |
| 357 | * might want to just discard instead, but that requires | ||
| 358 | * cooperation from AF_RXRPC. | ||
| 359 | */ | ||
| 360 | if (req->actual_len > req->len) | ||
| 355 | return -EBADMSG; | 361 | return -EBADMSG; |
| 356 | call->offset = 0; | 362 | |
| 363 | req->remain = req->actual_len; | ||
| 364 | call->offset = req->pos & (PAGE_SIZE - 1); | ||
| 365 | req->index = 0; | ||
| 366 | if (req->actual_len == 0) | ||
| 367 | goto no_more_data; | ||
| 357 | call->unmarshall++; | 368 | call->unmarshall++; |
| 358 | 369 | ||
| 370 | begin_page: | ||
| 371 | if (req->remain > PAGE_SIZE - call->offset) | ||
| 372 | size = PAGE_SIZE - call->offset; | ||
| 373 | else | ||
| 374 | size = req->remain; | ||
| 375 | call->count = call->offset + size; | ||
| 376 | ASSERTCMP(call->count, <=, PAGE_SIZE); | ||
| 377 | req->remain -= size; | ||
| 378 | |||
| 359 | /* extract the returned data */ | 379 | /* extract the returned data */ |
| 360 | case 3: | 380 | case 3: |
| 361 | _debug("extract data"); | 381 | _debug("extract data %u/%llu %zu/%u", |
| 362 | if (call->count > 0) { | 382 | req->remain, req->actual_len, call->offset, call->count); |
| 363 | page = call->reply3; | 383 | |
| 364 | buffer = kmap(page); | 384 | buffer = kmap(req->pages[req->index]); |
| 365 | ret = afs_extract_data(call, buffer, | 385 | ret = afs_extract_data(call, buffer, call->count, true); |
| 366 | call->count, true); | 386 | kunmap(req->pages[req->index]); |
| 367 | kunmap(page); | 387 | if (ret < 0) |
| 368 | if (ret < 0) | 388 | return ret; |
| 369 | return ret; | 389 | if (call->offset == PAGE_SIZE) { |
| 390 | if (req->page_done) | ||
| 391 | req->page_done(call, req); | ||
| 392 | if (req->remain > 0) { | ||
| 393 | req->index++; | ||
| 394 | call->offset = 0; | ||
| 395 | goto begin_page; | ||
| 396 | } | ||
| 370 | } | 397 | } |
| 371 | 398 | ||
| 399 | no_more_data: | ||
| 372 | call->offset = 0; | 400 | call->offset = 0; |
| 373 | call->unmarshall++; | 401 | call->unmarshall++; |
| 374 | 402 | ||
| @@ -393,17 +421,25 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 393 | } | 421 | } |
| 394 | 422 | ||
| 395 | if (call->count < PAGE_SIZE) { | 423 | if (call->count < PAGE_SIZE) { |
| 396 | _debug("clear"); | 424 | buffer = kmap(req->pages[req->index]); |
| 397 | page = call->reply3; | ||
| 398 | buffer = kmap(page); | ||
| 399 | memset(buffer + call->count, 0, PAGE_SIZE - call->count); | 425 | memset(buffer + call->count, 0, PAGE_SIZE - call->count); |
| 400 | kunmap(page); | 426 | kunmap(req->pages[req->index]); |
| 427 | if (req->page_done) | ||
| 428 | req->page_done(call, req); | ||
| 401 | } | 429 | } |
| 402 | 430 | ||
| 403 | _leave(" = 0 [done]"); | 431 | _leave(" = 0 [done]"); |
| 404 | return 0; | 432 | return 0; |
| 405 | } | 433 | } |
| 406 | 434 | ||
| 435 | static void afs_fetch_data_destructor(struct afs_call *call) | ||
| 436 | { | ||
| 437 | struct afs_read *req = call->reply3; | ||
| 438 | |||
| 439 | afs_put_read(req); | ||
| 440 | afs_flat_call_destructor(call); | ||
| 441 | } | ||
| 442 | |||
| 407 | /* | 443 | /* |
| 408 | * FS.FetchData operation type | 444 | * FS.FetchData operation type |
| 409 | */ | 445 | */ |
| @@ -411,14 +447,14 @@ static const struct afs_call_type afs_RXFSFetchData = { | |||
| 411 | .name = "FS.FetchData", | 447 | .name = "FS.FetchData", |
| 412 | .deliver = afs_deliver_fs_fetch_data, | 448 | .deliver = afs_deliver_fs_fetch_data, |
| 413 | .abort_to_error = afs_abort_to_error, | 449 | .abort_to_error = afs_abort_to_error, |
| 414 | .destructor = afs_flat_call_destructor, | 450 | .destructor = afs_fetch_data_destructor, |
| 415 | }; | 451 | }; |
| 416 | 452 | ||
| 417 | static const struct afs_call_type afs_RXFSFetchData64 = { | 453 | static const struct afs_call_type afs_RXFSFetchData64 = { |
| 418 | .name = "FS.FetchData64", | 454 | .name = "FS.FetchData64", |
| 419 | .deliver = afs_deliver_fs_fetch_data, | 455 | .deliver = afs_deliver_fs_fetch_data, |
| 420 | .abort_to_error = afs_abort_to_error, | 456 | .abort_to_error = afs_abort_to_error, |
| 421 | .destructor = afs_flat_call_destructor, | 457 | .destructor = afs_fetch_data_destructor, |
| 422 | }; | 458 | }; |
| 423 | 459 | ||
| 424 | /* | 460 | /* |
| @@ -427,17 +463,14 @@ static const struct afs_call_type afs_RXFSFetchData64 = { | |||
| 427 | static int afs_fs_fetch_data64(struct afs_server *server, | 463 | static int afs_fs_fetch_data64(struct afs_server *server, |
| 428 | struct key *key, | 464 | struct key *key, |
| 429 | struct afs_vnode *vnode, | 465 | struct afs_vnode *vnode, |
| 430 | off_t offset, size_t length, | 466 | struct afs_read *req, |
| 431 | struct page *buffer, | 467 | bool async) |
| 432 | const struct afs_wait_mode *wait_mode) | ||
| 433 | { | 468 | { |
| 434 | struct afs_call *call; | 469 | struct afs_call *call; |
| 435 | __be32 *bp; | 470 | __be32 *bp; |
| 436 | 471 | ||
| 437 | _enter(""); | 472 | _enter(""); |
| 438 | 473 | ||
| 439 | ASSERTCMP(length, <, ULONG_MAX); | ||
| 440 | |||
| 441 | call = afs_alloc_flat_call(&afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4); | 474 | call = afs_alloc_flat_call(&afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4); |
| 442 | if (!call) | 475 | if (!call) |
| 443 | return -ENOMEM; | 476 | return -ENOMEM; |
| @@ -445,7 +478,7 @@ static int afs_fs_fetch_data64(struct afs_server *server, | |||
| 445 | call->key = key; | 478 | call->key = key; |
| 446 | call->reply = vnode; | 479 | call->reply = vnode; |
| 447 | call->reply2 = NULL; /* volsync */ | 480 | call->reply2 = NULL; /* volsync */ |
| 448 | call->reply3 = buffer; | 481 | call->reply3 = req; |
| 449 | call->service_id = FS_SERVICE; | 482 | call->service_id = FS_SERVICE; |
| 450 | call->port = htons(AFS_FS_PORT); | 483 | call->port = htons(AFS_FS_PORT); |
| 451 | call->operation_ID = FSFETCHDATA64; | 484 | call->operation_ID = FSFETCHDATA64; |
| @@ -456,12 +489,13 @@ static int afs_fs_fetch_data64(struct afs_server *server, | |||
| 456 | bp[1] = htonl(vnode->fid.vid); | 489 | bp[1] = htonl(vnode->fid.vid); |
| 457 | bp[2] = htonl(vnode->fid.vnode); | 490 | bp[2] = htonl(vnode->fid.vnode); |
| 458 | bp[3] = htonl(vnode->fid.unique); | 491 | bp[3] = htonl(vnode->fid.unique); |
| 459 | bp[4] = htonl(upper_32_bits(offset)); | 492 | bp[4] = htonl(upper_32_bits(req->pos)); |
| 460 | bp[5] = htonl((u32) offset); | 493 | bp[5] = htonl(lower_32_bits(req->pos)); |
| 461 | bp[6] = 0; | 494 | bp[6] = 0; |
| 462 | bp[7] = htonl((u32) length); | 495 | bp[7] = htonl(lower_32_bits(req->len)); |
| 463 | 496 | ||
| 464 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 497 | atomic_inc(&req->usage); |
| 498 | return afs_make_call(&server->addr, call, GFP_NOFS, async); | ||
| 465 | } | 499 | } |
| 466 | 500 | ||
| 467 | /* | 501 | /* |
| @@ -470,16 +504,16 @@ static int afs_fs_fetch_data64(struct afs_server *server, | |||
| 470 | int afs_fs_fetch_data(struct afs_server *server, | 504 | int afs_fs_fetch_data(struct afs_server *server, |
| 471 | struct key *key, | 505 | struct key *key, |
| 472 | struct afs_vnode *vnode, | 506 | struct afs_vnode *vnode, |
| 473 | off_t offset, size_t length, | 507 | struct afs_read *req, |
| 474 | struct page *buffer, | 508 | bool async) |
| 475 | const struct afs_wait_mode *wait_mode) | ||
| 476 | { | 509 | { |
| 477 | struct afs_call *call; | 510 | struct afs_call *call; |
| 478 | __be32 *bp; | 511 | __be32 *bp; |
| 479 | 512 | ||
| 480 | if (upper_32_bits(offset) || upper_32_bits(offset + length)) | 513 | if (upper_32_bits(req->pos) || |
| 481 | return afs_fs_fetch_data64(server, key, vnode, offset, length, | 514 | upper_32_bits(req->len) || |
| 482 | buffer, wait_mode); | 515 | upper_32_bits(req->pos + req->len)) |
| 516 | return afs_fs_fetch_data64(server, key, vnode, req, async); | ||
| 483 | 517 | ||
| 484 | _enter(""); | 518 | _enter(""); |
| 485 | 519 | ||
| @@ -490,7 +524,7 @@ int afs_fs_fetch_data(struct afs_server *server, | |||
| 490 | call->key = key; | 524 | call->key = key; |
| 491 | call->reply = vnode; | 525 | call->reply = vnode; |
| 492 | call->reply2 = NULL; /* volsync */ | 526 | call->reply2 = NULL; /* volsync */ |
| 493 | call->reply3 = buffer; | 527 | call->reply3 = req; |
| 494 | call->service_id = FS_SERVICE; | 528 | call->service_id = FS_SERVICE; |
| 495 | call->port = htons(AFS_FS_PORT); | 529 | call->port = htons(AFS_FS_PORT); |
| 496 | call->operation_ID = FSFETCHDATA; | 530 | call->operation_ID = FSFETCHDATA; |
| @@ -501,10 +535,11 @@ int afs_fs_fetch_data(struct afs_server *server, | |||
| 501 | bp[1] = htonl(vnode->fid.vid); | 535 | bp[1] = htonl(vnode->fid.vid); |
| 502 | bp[2] = htonl(vnode->fid.vnode); | 536 | bp[2] = htonl(vnode->fid.vnode); |
| 503 | bp[3] = htonl(vnode->fid.unique); | 537 | bp[3] = htonl(vnode->fid.unique); |
| 504 | bp[4] = htonl(offset); | 538 | bp[4] = htonl(lower_32_bits(req->pos)); |
| 505 | bp[5] = htonl(length); | 539 | bp[5] = htonl(lower_32_bits(req->len)); |
| 506 | 540 | ||
| 507 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 541 | atomic_inc(&req->usage); |
| 542 | return afs_make_call(&server->addr, call, GFP_NOFS, async); | ||
| 508 | } | 543 | } |
| 509 | 544 | ||
| 510 | /* | 545 | /* |
| @@ -533,7 +568,7 @@ static const struct afs_call_type afs_RXFSGiveUpCallBacks = { | |||
| 533 | * - the callbacks are held in the server->cb_break ring | 568 | * - the callbacks are held in the server->cb_break ring |
| 534 | */ | 569 | */ |
| 535 | int afs_fs_give_up_callbacks(struct afs_server *server, | 570 | int afs_fs_give_up_callbacks(struct afs_server *server, |
| 536 | const struct afs_wait_mode *wait_mode) | 571 | bool async) |
| 537 | { | 572 | { |
| 538 | struct afs_call *call; | 573 | struct afs_call *call; |
| 539 | size_t ncallbacks; | 574 | size_t ncallbacks; |
| @@ -587,7 +622,7 @@ int afs_fs_give_up_callbacks(struct afs_server *server, | |||
| 587 | ASSERT(ncallbacks > 0); | 622 | ASSERT(ncallbacks > 0); |
| 588 | wake_up_nr(&server->cb_break_waitq, ncallbacks); | 623 | wake_up_nr(&server->cb_break_waitq, ncallbacks); |
| 589 | 624 | ||
| 590 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 625 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 591 | } | 626 | } |
| 592 | 627 | ||
| 593 | /* | 628 | /* |
| @@ -638,7 +673,7 @@ int afs_fs_create(struct afs_server *server, | |||
| 638 | struct afs_fid *newfid, | 673 | struct afs_fid *newfid, |
| 639 | struct afs_file_status *newstatus, | 674 | struct afs_file_status *newstatus, |
| 640 | struct afs_callback *newcb, | 675 | struct afs_callback *newcb, |
| 641 | const struct afs_wait_mode *wait_mode) | 676 | bool async) |
| 642 | { | 677 | { |
| 643 | struct afs_call *call; | 678 | struct afs_call *call; |
| 644 | size_t namesz, reqsz, padsz; | 679 | size_t namesz, reqsz, padsz; |
| @@ -683,7 +718,7 @@ int afs_fs_create(struct afs_server *server, | |||
| 683 | *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ | 718 | *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ |
| 684 | *bp++ = 0; /* segment size */ | 719 | *bp++ = 0; /* segment size */ |
| 685 | 720 | ||
| 686 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 721 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 687 | } | 722 | } |
| 688 | 723 | ||
| 689 | /* | 724 | /* |
| @@ -728,7 +763,7 @@ int afs_fs_remove(struct afs_server *server, | |||
| 728 | struct afs_vnode *vnode, | 763 | struct afs_vnode *vnode, |
| 729 | const char *name, | 764 | const char *name, |
| 730 | bool isdir, | 765 | bool isdir, |
| 731 | const struct afs_wait_mode *wait_mode) | 766 | bool async) |
| 732 | { | 767 | { |
| 733 | struct afs_call *call; | 768 | struct afs_call *call; |
| 734 | size_t namesz, reqsz, padsz; | 769 | size_t namesz, reqsz, padsz; |
| @@ -763,7 +798,7 @@ int afs_fs_remove(struct afs_server *server, | |||
| 763 | bp = (void *) bp + padsz; | 798 | bp = (void *) bp + padsz; |
| 764 | } | 799 | } |
| 765 | 800 | ||
| 766 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 801 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 767 | } | 802 | } |
| 768 | 803 | ||
| 769 | /* | 804 | /* |
| @@ -809,7 +844,7 @@ int afs_fs_link(struct afs_server *server, | |||
| 809 | struct afs_vnode *dvnode, | 844 | struct afs_vnode *dvnode, |
| 810 | struct afs_vnode *vnode, | 845 | struct afs_vnode *vnode, |
| 811 | const char *name, | 846 | const char *name, |
| 812 | const struct afs_wait_mode *wait_mode) | 847 | bool async) |
| 813 | { | 848 | { |
| 814 | struct afs_call *call; | 849 | struct afs_call *call; |
| 815 | size_t namesz, reqsz, padsz; | 850 | size_t namesz, reqsz, padsz; |
| @@ -848,7 +883,7 @@ int afs_fs_link(struct afs_server *server, | |||
| 848 | *bp++ = htonl(vnode->fid.vnode); | 883 | *bp++ = htonl(vnode->fid.vnode); |
| 849 | *bp++ = htonl(vnode->fid.unique); | 884 | *bp++ = htonl(vnode->fid.unique); |
| 850 | 885 | ||
| 851 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 886 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 852 | } | 887 | } |
| 853 | 888 | ||
| 854 | /* | 889 | /* |
| @@ -897,7 +932,7 @@ int afs_fs_symlink(struct afs_server *server, | |||
| 897 | const char *contents, | 932 | const char *contents, |
| 898 | struct afs_fid *newfid, | 933 | struct afs_fid *newfid, |
| 899 | struct afs_file_status *newstatus, | 934 | struct afs_file_status *newstatus, |
| 900 | const struct afs_wait_mode *wait_mode) | 935 | bool async) |
| 901 | { | 936 | { |
| 902 | struct afs_call *call; | 937 | struct afs_call *call; |
| 903 | size_t namesz, reqsz, padsz, c_namesz, c_padsz; | 938 | size_t namesz, reqsz, padsz, c_namesz, c_padsz; |
| @@ -952,7 +987,7 @@ int afs_fs_symlink(struct afs_server *server, | |||
| 952 | *bp++ = htonl(S_IRWXUGO); /* unix mode */ | 987 | *bp++ = htonl(S_IRWXUGO); /* unix mode */ |
| 953 | *bp++ = 0; /* segment size */ | 988 | *bp++ = 0; /* segment size */ |
| 954 | 989 | ||
| 955 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 990 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 956 | } | 991 | } |
| 957 | 992 | ||
| 958 | /* | 993 | /* |
| @@ -1001,7 +1036,7 @@ int afs_fs_rename(struct afs_server *server, | |||
| 1001 | const char *orig_name, | 1036 | const char *orig_name, |
| 1002 | struct afs_vnode *new_dvnode, | 1037 | struct afs_vnode *new_dvnode, |
| 1003 | const char *new_name, | 1038 | const char *new_name, |
| 1004 | const struct afs_wait_mode *wait_mode) | 1039 | bool async) |
| 1005 | { | 1040 | { |
| 1006 | struct afs_call *call; | 1041 | struct afs_call *call; |
| 1007 | size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz; | 1042 | size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz; |
| @@ -1055,7 +1090,7 @@ int afs_fs_rename(struct afs_server *server, | |||
| 1055 | bp = (void *) bp + n_padsz; | 1090 | bp = (void *) bp + n_padsz; |
| 1056 | } | 1091 | } |
| 1057 | 1092 | ||
| 1058 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1093 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1059 | } | 1094 | } |
| 1060 | 1095 | ||
| 1061 | /* | 1096 | /* |
| @@ -1110,7 +1145,7 @@ static int afs_fs_store_data64(struct afs_server *server, | |||
| 1110 | pgoff_t first, pgoff_t last, | 1145 | pgoff_t first, pgoff_t last, |
| 1111 | unsigned offset, unsigned to, | 1146 | unsigned offset, unsigned to, |
| 1112 | loff_t size, loff_t pos, loff_t i_size, | 1147 | loff_t size, loff_t pos, loff_t i_size, |
| 1113 | const struct afs_wait_mode *wait_mode) | 1148 | bool async) |
| 1114 | { | 1149 | { |
| 1115 | struct afs_vnode *vnode = wb->vnode; | 1150 | struct afs_vnode *vnode = wb->vnode; |
| 1116 | struct afs_call *call; | 1151 | struct afs_call *call; |
| @@ -1159,7 +1194,7 @@ static int afs_fs_store_data64(struct afs_server *server, | |||
| 1159 | *bp++ = htonl(i_size >> 32); | 1194 | *bp++ = htonl(i_size >> 32); |
| 1160 | *bp++ = htonl((u32) i_size); | 1195 | *bp++ = htonl((u32) i_size); |
| 1161 | 1196 | ||
| 1162 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1197 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1163 | } | 1198 | } |
| 1164 | 1199 | ||
| 1165 | /* | 1200 | /* |
| @@ -1168,7 +1203,7 @@ static int afs_fs_store_data64(struct afs_server *server, | |||
| 1168 | int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, | 1203 | int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, |
| 1169 | pgoff_t first, pgoff_t last, | 1204 | pgoff_t first, pgoff_t last, |
| 1170 | unsigned offset, unsigned to, | 1205 | unsigned offset, unsigned to, |
| 1171 | const struct afs_wait_mode *wait_mode) | 1206 | bool async) |
| 1172 | { | 1207 | { |
| 1173 | struct afs_vnode *vnode = wb->vnode; | 1208 | struct afs_vnode *vnode = wb->vnode; |
| 1174 | struct afs_call *call; | 1209 | struct afs_call *call; |
| @@ -1194,7 +1229,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, | |||
| 1194 | 1229 | ||
| 1195 | if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32) | 1230 | if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32) |
| 1196 | return afs_fs_store_data64(server, wb, first, last, offset, to, | 1231 | return afs_fs_store_data64(server, wb, first, last, offset, to, |
| 1197 | size, pos, i_size, wait_mode); | 1232 | size, pos, i_size, async); |
| 1198 | 1233 | ||
| 1199 | call = afs_alloc_flat_call(&afs_RXFSStoreData, | 1234 | call = afs_alloc_flat_call(&afs_RXFSStoreData, |
| 1200 | (4 + 6 + 3) * 4, | 1235 | (4 + 6 + 3) * 4, |
| @@ -1233,7 +1268,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, | |||
| 1233 | *bp++ = htonl(size); | 1268 | *bp++ = htonl(size); |
| 1234 | *bp++ = htonl(i_size); | 1269 | *bp++ = htonl(i_size); |
| 1235 | 1270 | ||
| 1236 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1271 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1237 | } | 1272 | } |
| 1238 | 1273 | ||
| 1239 | /* | 1274 | /* |
| @@ -1295,7 +1330,7 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = { | |||
| 1295 | */ | 1330 | */ |
| 1296 | static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, | 1331 | static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, |
| 1297 | struct afs_vnode *vnode, struct iattr *attr, | 1332 | struct afs_vnode *vnode, struct iattr *attr, |
| 1298 | const struct afs_wait_mode *wait_mode) | 1333 | bool async) |
| 1299 | { | 1334 | { |
| 1300 | struct afs_call *call; | 1335 | struct afs_call *call; |
| 1301 | __be32 *bp; | 1336 | __be32 *bp; |
| @@ -1334,7 +1369,7 @@ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, | |||
| 1334 | *bp++ = htonl(attr->ia_size >> 32); /* new file length */ | 1369 | *bp++ = htonl(attr->ia_size >> 32); /* new file length */ |
| 1335 | *bp++ = htonl((u32) attr->ia_size); | 1370 | *bp++ = htonl((u32) attr->ia_size); |
| 1336 | 1371 | ||
| 1337 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1372 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1338 | } | 1373 | } |
| 1339 | 1374 | ||
| 1340 | /* | 1375 | /* |
| @@ -1343,7 +1378,7 @@ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key, | |||
| 1343 | */ | 1378 | */ |
| 1344 | static int afs_fs_setattr_size(struct afs_server *server, struct key *key, | 1379 | static int afs_fs_setattr_size(struct afs_server *server, struct key *key, |
| 1345 | struct afs_vnode *vnode, struct iattr *attr, | 1380 | struct afs_vnode *vnode, struct iattr *attr, |
| 1346 | const struct afs_wait_mode *wait_mode) | 1381 | bool async) |
| 1347 | { | 1382 | { |
| 1348 | struct afs_call *call; | 1383 | struct afs_call *call; |
| 1349 | __be32 *bp; | 1384 | __be32 *bp; |
| @@ -1354,7 +1389,7 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, | |||
| 1354 | ASSERT(attr->ia_valid & ATTR_SIZE); | 1389 | ASSERT(attr->ia_valid & ATTR_SIZE); |
| 1355 | if (attr->ia_size >> 32) | 1390 | if (attr->ia_size >> 32) |
| 1356 | return afs_fs_setattr_size64(server, key, vnode, attr, | 1391 | return afs_fs_setattr_size64(server, key, vnode, attr, |
| 1357 | wait_mode); | 1392 | async); |
| 1358 | 1393 | ||
| 1359 | call = afs_alloc_flat_call(&afs_RXFSStoreData_as_Status, | 1394 | call = afs_alloc_flat_call(&afs_RXFSStoreData_as_Status, |
| 1360 | (4 + 6 + 3) * 4, | 1395 | (4 + 6 + 3) * 4, |
| @@ -1382,7 +1417,7 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, | |||
| 1382 | *bp++ = 0; /* size of write */ | 1417 | *bp++ = 0; /* size of write */ |
| 1383 | *bp++ = htonl(attr->ia_size); /* new file length */ | 1418 | *bp++ = htonl(attr->ia_size); /* new file length */ |
| 1384 | 1419 | ||
| 1385 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1420 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1386 | } | 1421 | } |
| 1387 | 1422 | ||
| 1388 | /* | 1423 | /* |
| @@ -1391,14 +1426,14 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key, | |||
| 1391 | */ | 1426 | */ |
| 1392 | int afs_fs_setattr(struct afs_server *server, struct key *key, | 1427 | int afs_fs_setattr(struct afs_server *server, struct key *key, |
| 1393 | struct afs_vnode *vnode, struct iattr *attr, | 1428 | struct afs_vnode *vnode, struct iattr *attr, |
| 1394 | const struct afs_wait_mode *wait_mode) | 1429 | bool async) |
| 1395 | { | 1430 | { |
| 1396 | struct afs_call *call; | 1431 | struct afs_call *call; |
| 1397 | __be32 *bp; | 1432 | __be32 *bp; |
| 1398 | 1433 | ||
| 1399 | if (attr->ia_valid & ATTR_SIZE) | 1434 | if (attr->ia_valid & ATTR_SIZE) |
| 1400 | return afs_fs_setattr_size(server, key, vnode, attr, | 1435 | return afs_fs_setattr_size(server, key, vnode, attr, |
| 1401 | wait_mode); | 1436 | async); |
| 1402 | 1437 | ||
| 1403 | _enter(",%x,{%x:%u},,", | 1438 | _enter(",%x,{%x:%u},,", |
| 1404 | key_serial(key), vnode->fid.vid, vnode->fid.vnode); | 1439 | key_serial(key), vnode->fid.vid, vnode->fid.vnode); |
| @@ -1424,7 +1459,7 @@ int afs_fs_setattr(struct afs_server *server, struct key *key, | |||
| 1424 | 1459 | ||
| 1425 | xdr_encode_AFS_StoreStatus(&bp, attr); | 1460 | xdr_encode_AFS_StoreStatus(&bp, attr); |
| 1426 | 1461 | ||
| 1427 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1462 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1428 | } | 1463 | } |
| 1429 | 1464 | ||
| 1430 | /* | 1465 | /* |
| @@ -1626,7 +1661,7 @@ int afs_fs_get_volume_status(struct afs_server *server, | |||
| 1626 | struct key *key, | 1661 | struct key *key, |
| 1627 | struct afs_vnode *vnode, | 1662 | struct afs_vnode *vnode, |
| 1628 | struct afs_volume_status *vs, | 1663 | struct afs_volume_status *vs, |
| 1629 | const struct afs_wait_mode *wait_mode) | 1664 | bool async) |
| 1630 | { | 1665 | { |
| 1631 | struct afs_call *call; | 1666 | struct afs_call *call; |
| 1632 | __be32 *bp; | 1667 | __be32 *bp; |
| @@ -1656,7 +1691,7 @@ int afs_fs_get_volume_status(struct afs_server *server, | |||
| 1656 | bp[0] = htonl(FSGETVOLUMESTATUS); | 1691 | bp[0] = htonl(FSGETVOLUMESTATUS); |
| 1657 | bp[1] = htonl(vnode->fid.vid); | 1692 | bp[1] = htonl(vnode->fid.vid); |
| 1658 | 1693 | ||
| 1659 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1694 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1660 | } | 1695 | } |
| 1661 | 1696 | ||
| 1662 | /* | 1697 | /* |
| @@ -1718,7 +1753,7 @@ int afs_fs_set_lock(struct afs_server *server, | |||
| 1718 | struct key *key, | 1753 | struct key *key, |
| 1719 | struct afs_vnode *vnode, | 1754 | struct afs_vnode *vnode, |
| 1720 | afs_lock_type_t type, | 1755 | afs_lock_type_t type, |
| 1721 | const struct afs_wait_mode *wait_mode) | 1756 | bool async) |
| 1722 | { | 1757 | { |
| 1723 | struct afs_call *call; | 1758 | struct afs_call *call; |
| 1724 | __be32 *bp; | 1759 | __be32 *bp; |
| @@ -1742,7 +1777,7 @@ int afs_fs_set_lock(struct afs_server *server, | |||
| 1742 | *bp++ = htonl(vnode->fid.unique); | 1777 | *bp++ = htonl(vnode->fid.unique); |
| 1743 | *bp++ = htonl(type); | 1778 | *bp++ = htonl(type); |
| 1744 | 1779 | ||
| 1745 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1780 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1746 | } | 1781 | } |
| 1747 | 1782 | ||
| 1748 | /* | 1783 | /* |
| @@ -1751,7 +1786,7 @@ int afs_fs_set_lock(struct afs_server *server, | |||
| 1751 | int afs_fs_extend_lock(struct afs_server *server, | 1786 | int afs_fs_extend_lock(struct afs_server *server, |
| 1752 | struct key *key, | 1787 | struct key *key, |
| 1753 | struct afs_vnode *vnode, | 1788 | struct afs_vnode *vnode, |
| 1754 | const struct afs_wait_mode *wait_mode) | 1789 | bool async) |
| 1755 | { | 1790 | { |
| 1756 | struct afs_call *call; | 1791 | struct afs_call *call; |
| 1757 | __be32 *bp; | 1792 | __be32 *bp; |
| @@ -1774,7 +1809,7 @@ int afs_fs_extend_lock(struct afs_server *server, | |||
| 1774 | *bp++ = htonl(vnode->fid.vnode); | 1809 | *bp++ = htonl(vnode->fid.vnode); |
| 1775 | *bp++ = htonl(vnode->fid.unique); | 1810 | *bp++ = htonl(vnode->fid.unique); |
| 1776 | 1811 | ||
| 1777 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1812 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1778 | } | 1813 | } |
| 1779 | 1814 | ||
| 1780 | /* | 1815 | /* |
| @@ -1783,7 +1818,7 @@ int afs_fs_extend_lock(struct afs_server *server, | |||
| 1783 | int afs_fs_release_lock(struct afs_server *server, | 1818 | int afs_fs_release_lock(struct afs_server *server, |
| 1784 | struct key *key, | 1819 | struct key *key, |
| 1785 | struct afs_vnode *vnode, | 1820 | struct afs_vnode *vnode, |
| 1786 | const struct afs_wait_mode *wait_mode) | 1821 | bool async) |
| 1787 | { | 1822 | { |
| 1788 | struct afs_call *call; | 1823 | struct afs_call *call; |
| 1789 | __be32 *bp; | 1824 | __be32 *bp; |
| @@ -1806,5 +1841,5 @@ int afs_fs_release_lock(struct afs_server *server, | |||
| 1806 | *bp++ = htonl(vnode->fid.vnode); | 1841 | *bp++ = htonl(vnode->fid.vnode); |
| 1807 | *bp++ = htonl(vnode->fid.unique); | 1842 | *bp++ = htonl(vnode->fid.unique); |
| 1808 | 1843 | ||
| 1809 | return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode); | 1844 | return afs_make_call(&server->addr, call, GFP_NOFS, async); |
| 1810 | } | 1845 | } |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 535a38d2c1d0..8acf3670e756 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/fscache.h> | 20 | #include <linux/fscache.h> |
| 21 | #include <linux/backing-dev.h> | 21 | #include <linux/backing-dev.h> |
| 22 | #include <linux/uuid.h> | ||
| 22 | #include <net/af_rxrpc.h> | 23 | #include <net/af_rxrpc.h> |
| 23 | 24 | ||
| 24 | #include "afs.h" | 25 | #include "afs.h" |
| @@ -51,31 +52,22 @@ struct afs_mount_params { | |||
| 51 | struct key *key; /* key to use for secure mounting */ | 52 | struct key *key; /* key to use for secure mounting */ |
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 54 | /* | 55 | enum afs_call_state { |
| 55 | * definition of how to wait for the completion of an operation | 56 | AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ |
| 56 | */ | 57 | AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ |
| 57 | struct afs_wait_mode { | 58 | AFS_CALL_AWAIT_OP_ID, /* awaiting op ID on incoming call */ |
| 58 | /* RxRPC received message notification */ | 59 | AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */ |
| 59 | rxrpc_notify_rx_t notify_rx; | 60 | AFS_CALL_REPLYING, /* replying to incoming call */ |
| 60 | 61 | AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */ | |
| 61 | /* synchronous call waiter and call dispatched notification */ | 62 | AFS_CALL_COMPLETE, /* Completed or failed */ |
| 62 | int (*wait)(struct afs_call *call); | ||
| 63 | |||
| 64 | /* asynchronous call completion */ | ||
| 65 | void (*async_complete)(void *reply, int error); | ||
| 66 | }; | 63 | }; |
| 67 | |||
| 68 | extern const struct afs_wait_mode afs_sync_call; | ||
| 69 | extern const struct afs_wait_mode afs_async_call; | ||
| 70 | |||
| 71 | /* | 64 | /* |
| 72 | * a record of an in-progress RxRPC call | 65 | * a record of an in-progress RxRPC call |
| 73 | */ | 66 | */ |
| 74 | struct afs_call { | 67 | struct afs_call { |
| 75 | const struct afs_call_type *type; /* type of call */ | 68 | const struct afs_call_type *type; /* type of call */ |
| 76 | const struct afs_wait_mode *wait_mode; /* completion wait mode */ | ||
| 77 | wait_queue_head_t waitq; /* processes awaiting completion */ | 69 | wait_queue_head_t waitq; /* processes awaiting completion */ |
| 78 | struct work_struct async_work; /* asynchronous work processor */ | 70 | struct work_struct async_work; /* async I/O processor */ |
| 79 | struct work_struct work; /* actual work processor */ | 71 | struct work_struct work; /* actual work processor */ |
| 80 | struct rxrpc_call *rxcall; /* RxRPC call handle */ | 72 | struct rxrpc_call *rxcall; /* RxRPC call handle */ |
| 81 | struct key *key; /* security for this call */ | 73 | struct key *key; /* security for this call */ |
| @@ -91,15 +83,8 @@ struct afs_call { | |||
| 91 | pgoff_t first; /* first page in mapping to deal with */ | 83 | pgoff_t first; /* first page in mapping to deal with */ |
| 92 | pgoff_t last; /* last page in mapping to deal with */ | 84 | pgoff_t last; /* last page in mapping to deal with */ |
| 93 | size_t offset; /* offset into received data store */ | 85 | size_t offset; /* offset into received data store */ |
| 94 | enum { /* call state */ | 86 | atomic_t usage; |
| 95 | AFS_CALL_REQUESTING, /* request is being sent for outgoing call */ | 87 | enum afs_call_state state; |
| 96 | AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */ | ||
| 97 | AFS_CALL_AWAIT_OP_ID, /* awaiting op ID on incoming call */ | ||
| 98 | AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */ | ||
| 99 | AFS_CALL_REPLYING, /* replying to incoming call */ | ||
| 100 | AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */ | ||
| 101 | AFS_CALL_COMPLETE, /* Completed or failed */ | ||
| 102 | } state; | ||
| 103 | int error; /* error code */ | 88 | int error; /* error code */ |
| 104 | u32 abort_code; /* Remote abort ID or 0 */ | 89 | u32 abort_code; /* Remote abort ID or 0 */ |
| 105 | unsigned request_size; /* size of request data */ | 90 | unsigned request_size; /* size of request data */ |
| @@ -110,6 +95,7 @@ struct afs_call { | |||
| 110 | bool incoming; /* T if incoming call */ | 95 | bool incoming; /* T if incoming call */ |
| 111 | bool send_pages; /* T if data from mapping should be sent */ | 96 | bool send_pages; /* T if data from mapping should be sent */ |
| 112 | bool need_attention; /* T if RxRPC poked us */ | 97 | bool need_attention; /* T if RxRPC poked us */ |
| 98 | bool async; /* T if asynchronous */ | ||
| 113 | u16 service_id; /* RxRPC service ID to call */ | 99 | u16 service_id; /* RxRPC service ID to call */ |
| 114 | __be16 port; /* target UDP port */ | 100 | __be16 port; /* target UDP port */ |
| 115 | u32 operation_ID; /* operation ID for an incoming call */ | 101 | u32 operation_ID; /* operation ID for an incoming call */ |
| @@ -131,6 +117,25 @@ struct afs_call_type { | |||
| 131 | 117 | ||
| 132 | /* clean up a call */ | 118 | /* clean up a call */ |
| 133 | void (*destructor)(struct afs_call *call); | 119 | void (*destructor)(struct afs_call *call); |
| 120 | |||
| 121 | /* Work function */ | ||
| 122 | void (*work)(struct work_struct *work); | ||
| 123 | }; | ||
| 124 | |||
| 125 | /* | ||
| 126 | * Record of an outstanding read operation on a vnode. | ||
| 127 | */ | ||
| 128 | struct afs_read { | ||
| 129 | loff_t pos; /* Where to start reading */ | ||
| 130 | loff_t len; /* How much to read */ | ||
| 131 | loff_t actual_len; /* How much we're actually getting */ | ||
| 132 | atomic_t usage; | ||
| 133 | unsigned int remain; /* Amount remaining */ | ||
| 134 | unsigned int index; /* Which page we're reading into */ | ||
| 135 | unsigned int pg_offset; /* Offset in page we're at */ | ||
| 136 | unsigned int nr_pages; | ||
| 137 | void (*page_done)(struct afs_call *, struct afs_read *); | ||
| 138 | struct page *pages[]; | ||
| 134 | }; | 139 | }; |
| 135 | 140 | ||
| 136 | /* | 141 | /* |
| @@ -403,30 +408,6 @@ struct afs_interface { | |||
| 403 | unsigned mtu; /* MTU of interface */ | 408 | unsigned mtu; /* MTU of interface */ |
| 404 | }; | 409 | }; |
| 405 | 410 | ||
| 406 | /* | ||
| 407 | * UUID definition [internet draft] | ||
| 408 | * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns | ||
| 409 | * increments since midnight 15th October 1582 | ||
| 410 | * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID | ||
| 411 | * time | ||
| 412 | * - the clock sequence is a 14-bit counter to avoid duplicate times | ||
| 413 | */ | ||
| 414 | struct afs_uuid { | ||
| 415 | u32 time_low; /* low part of timestamp */ | ||
| 416 | u16 time_mid; /* mid part of timestamp */ | ||
| 417 | u16 time_hi_and_version; /* high part of timestamp and version */ | ||
| 418 | #define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000ULL | ||
| 419 | #define AFS_UUID_TIMEHI_MASK 0x0fff | ||
| 420 | #define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */ | ||
| 421 | #define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */ | ||
| 422 | #define AFS_UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */ | ||
| 423 | u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */ | ||
| 424 | #define AFS_UUID_CLOCKHI_MASK 0x3f | ||
| 425 | #define AFS_UUID_VARIANT_STD 0x80 | ||
| 426 | u8 clock_seq_low; /* clock seq low */ | ||
| 427 | u8 node[6]; /* spatially unique node ID (MAC addr) */ | ||
| 428 | }; | ||
| 429 | |||
| 430 | /*****************************************************************************/ | 411 | /*****************************************************************************/ |
| 431 | /* | 412 | /* |
| 432 | * cache.c | 413 | * cache.c |
| @@ -494,6 +475,7 @@ extern const struct file_operations afs_file_operations; | |||
| 494 | extern int afs_open(struct inode *, struct file *); | 475 | extern int afs_open(struct inode *, struct file *); |
| 495 | extern int afs_release(struct inode *, struct file *); | 476 | extern int afs_release(struct inode *, struct file *); |
| 496 | extern int afs_page_filler(void *, struct page *); | 477 | extern int afs_page_filler(void *, struct page *); |
| 478 | extern void afs_put_read(struct afs_read *); | ||
| 497 | 479 | ||
| 498 | /* | 480 | /* |
| 499 | * flock.c | 481 | * flock.c |
| @@ -509,50 +491,37 @@ extern int afs_flock(struct file *, int, struct file_lock *); | |||
| 509 | */ | 491 | */ |
| 510 | extern int afs_fs_fetch_file_status(struct afs_server *, struct key *, | 492 | extern int afs_fs_fetch_file_status(struct afs_server *, struct key *, |
| 511 | struct afs_vnode *, struct afs_volsync *, | 493 | struct afs_vnode *, struct afs_volsync *, |
| 512 | const struct afs_wait_mode *); | 494 | bool); |
| 513 | extern int afs_fs_give_up_callbacks(struct afs_server *, | 495 | extern int afs_fs_give_up_callbacks(struct afs_server *, bool); |
| 514 | const struct afs_wait_mode *); | ||
| 515 | extern int afs_fs_fetch_data(struct afs_server *, struct key *, | 496 | extern int afs_fs_fetch_data(struct afs_server *, struct key *, |
| 516 | struct afs_vnode *, off_t, size_t, struct page *, | 497 | struct afs_vnode *, struct afs_read *, bool); |
| 517 | const struct afs_wait_mode *); | ||
| 518 | extern int afs_fs_create(struct afs_server *, struct key *, | 498 | extern int afs_fs_create(struct afs_server *, struct key *, |
| 519 | struct afs_vnode *, const char *, umode_t, | 499 | struct afs_vnode *, const char *, umode_t, |
| 520 | struct afs_fid *, struct afs_file_status *, | 500 | struct afs_fid *, struct afs_file_status *, |
| 521 | struct afs_callback *, | 501 | struct afs_callback *, bool); |
| 522 | const struct afs_wait_mode *); | ||
| 523 | extern int afs_fs_remove(struct afs_server *, struct key *, | 502 | extern int afs_fs_remove(struct afs_server *, struct key *, |
| 524 | struct afs_vnode *, const char *, bool, | 503 | struct afs_vnode *, const char *, bool, bool); |
| 525 | const struct afs_wait_mode *); | ||
| 526 | extern int afs_fs_link(struct afs_server *, struct key *, struct afs_vnode *, | 504 | extern int afs_fs_link(struct afs_server *, struct key *, struct afs_vnode *, |
| 527 | struct afs_vnode *, const char *, | 505 | struct afs_vnode *, const char *, bool); |
| 528 | const struct afs_wait_mode *); | ||
| 529 | extern int afs_fs_symlink(struct afs_server *, struct key *, | 506 | extern int afs_fs_symlink(struct afs_server *, struct key *, |
| 530 | struct afs_vnode *, const char *, const char *, | 507 | struct afs_vnode *, const char *, const char *, |
| 531 | struct afs_fid *, struct afs_file_status *, | 508 | struct afs_fid *, struct afs_file_status *, bool); |
| 532 | const struct afs_wait_mode *); | ||
| 533 | extern int afs_fs_rename(struct afs_server *, struct key *, | 509 | extern int afs_fs_rename(struct afs_server *, struct key *, |
| 534 | struct afs_vnode *, const char *, | 510 | struct afs_vnode *, const char *, |
| 535 | struct afs_vnode *, const char *, | 511 | struct afs_vnode *, const char *, bool); |
| 536 | const struct afs_wait_mode *); | ||
| 537 | extern int afs_fs_store_data(struct afs_server *, struct afs_writeback *, | 512 | extern int afs_fs_store_data(struct afs_server *, struct afs_writeback *, |
| 538 | pgoff_t, pgoff_t, unsigned, unsigned, | 513 | pgoff_t, pgoff_t, unsigned, unsigned, bool); |
| 539 | const struct afs_wait_mode *); | ||
| 540 | extern int afs_fs_setattr(struct afs_server *, struct key *, | 514 | extern int afs_fs_setattr(struct afs_server *, struct key *, |
| 541 | struct afs_vnode *, struct iattr *, | 515 | struct afs_vnode *, struct iattr *, bool); |
| 542 | const struct afs_wait_mode *); | ||
| 543 | extern int afs_fs_get_volume_status(struct afs_server *, struct key *, | 516 | extern int afs_fs_get_volume_status(struct afs_server *, struct key *, |
| 544 | struct afs_vnode *, | 517 | struct afs_vnode *, |
| 545 | struct afs_volume_status *, | 518 | struct afs_volume_status *, bool); |
| 546 | const struct afs_wait_mode *); | ||
| 547 | extern int afs_fs_set_lock(struct afs_server *, struct key *, | 519 | extern int afs_fs_set_lock(struct afs_server *, struct key *, |
| 548 | struct afs_vnode *, afs_lock_type_t, | 520 | struct afs_vnode *, afs_lock_type_t, bool); |
| 549 | const struct afs_wait_mode *); | ||
| 550 | extern int afs_fs_extend_lock(struct afs_server *, struct key *, | 521 | extern int afs_fs_extend_lock(struct afs_server *, struct key *, |
| 551 | struct afs_vnode *, | 522 | struct afs_vnode *, bool); |
| 552 | const struct afs_wait_mode *); | ||
| 553 | extern int afs_fs_release_lock(struct afs_server *, struct key *, | 523 | extern int afs_fs_release_lock(struct afs_server *, struct key *, |
| 554 | struct afs_vnode *, | 524 | struct afs_vnode *, bool); |
| 555 | const struct afs_wait_mode *); | ||
| 556 | 525 | ||
| 557 | /* | 526 | /* |
| 558 | * inode.c | 527 | * inode.c |
| @@ -573,7 +542,7 @@ extern int afs_drop_inode(struct inode *); | |||
| 573 | * main.c | 542 | * main.c |
| 574 | */ | 543 | */ |
| 575 | extern struct workqueue_struct *afs_wq; | 544 | extern struct workqueue_struct *afs_wq; |
| 576 | extern struct afs_uuid afs_uuid; | 545 | extern struct uuid_v1 afs_uuid; |
| 577 | 546 | ||
| 578 | /* | 547 | /* |
| 579 | * misc.c | 548 | * misc.c |
| @@ -592,6 +561,11 @@ extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); | |||
| 592 | extern void afs_mntpt_kill_timer(void); | 561 | extern void afs_mntpt_kill_timer(void); |
| 593 | 562 | ||
| 594 | /* | 563 | /* |
| 564 | * netdevices.c | ||
| 565 | */ | ||
| 566 | extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool); | ||
| 567 | |||
| 568 | /* | ||
| 595 | * proc.c | 569 | * proc.c |
| 596 | */ | 570 | */ |
| 597 | extern int afs_proc_init(void); | 571 | extern int afs_proc_init(void); |
| @@ -603,11 +577,13 @@ extern void afs_proc_cell_remove(struct afs_cell *); | |||
| 603 | * rxrpc.c | 577 | * rxrpc.c |
| 604 | */ | 578 | */ |
| 605 | extern struct socket *afs_socket; | 579 | extern struct socket *afs_socket; |
| 580 | extern atomic_t afs_outstanding_calls; | ||
| 606 | 581 | ||
| 607 | extern int afs_open_socket(void); | 582 | extern int afs_open_socket(void); |
| 608 | extern void afs_close_socket(void); | 583 | extern void afs_close_socket(void); |
| 609 | extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, | 584 | extern void afs_put_call(struct afs_call *); |
| 610 | const struct afs_wait_mode *); | 585 | extern int afs_queue_call_work(struct afs_call *); |
| 586 | extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, bool); | ||
| 611 | extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, | 587 | extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, |
| 612 | size_t, size_t); | 588 | size_t, size_t); |
| 613 | extern void afs_flat_call_destructor(struct afs_call *); | 589 | extern void afs_flat_call_destructor(struct afs_call *); |
| @@ -653,21 +629,14 @@ extern int afs_fs_init(void); | |||
| 653 | extern void afs_fs_exit(void); | 629 | extern void afs_fs_exit(void); |
| 654 | 630 | ||
| 655 | /* | 631 | /* |
| 656 | * use-rtnetlink.c | ||
| 657 | */ | ||
| 658 | extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool); | ||
| 659 | extern int afs_get_MAC_address(u8 *, size_t); | ||
| 660 | |||
| 661 | /* | ||
| 662 | * vlclient.c | 632 | * vlclient.c |
| 663 | */ | 633 | */ |
| 664 | extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *, | 634 | extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *, |
| 665 | const char *, struct afs_cache_vlocation *, | 635 | const char *, struct afs_cache_vlocation *, |
| 666 | const struct afs_wait_mode *); | 636 | bool); |
| 667 | extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *, | 637 | extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *, |
| 668 | afs_volid_t, afs_voltype_t, | 638 | afs_volid_t, afs_voltype_t, |
| 669 | struct afs_cache_vlocation *, | 639 | struct afs_cache_vlocation *, bool); |
| 670 | const struct afs_wait_mode *); | ||
| 671 | 640 | ||
| 672 | /* | 641 | /* |
| 673 | * vlocation.c | 642 | * vlocation.c |
| @@ -699,7 +668,7 @@ extern void afs_vnode_finalise_status_update(struct afs_vnode *, | |||
| 699 | extern int afs_vnode_fetch_status(struct afs_vnode *, struct afs_vnode *, | 668 | extern int afs_vnode_fetch_status(struct afs_vnode *, struct afs_vnode *, |
| 700 | struct key *); | 669 | struct key *); |
| 701 | extern int afs_vnode_fetch_data(struct afs_vnode *, struct key *, | 670 | extern int afs_vnode_fetch_data(struct afs_vnode *, struct key *, |
| 702 | off_t, size_t, struct page *); | 671 | struct afs_read *); |
| 703 | extern int afs_vnode_create(struct afs_vnode *, struct key *, const char *, | 672 | extern int afs_vnode_create(struct afs_vnode *, struct key *, const char *, |
| 704 | umode_t, struct afs_fid *, struct afs_file_status *, | 673 | umode_t, struct afs_fid *, struct afs_file_status *, |
| 705 | struct afs_callback *, struct afs_server **); | 674 | struct afs_callback *, struct afs_server **); |
| @@ -756,6 +725,8 @@ extern int afs_fsync(struct file *, loff_t, loff_t, int); | |||
| 756 | /* | 725 | /* |
| 757 | * debug tracing | 726 | * debug tracing |
| 758 | */ | 727 | */ |
| 728 | #include <trace/events/afs.h> | ||
| 729 | |||
| 759 | extern unsigned afs_debug; | 730 | extern unsigned afs_debug; |
| 760 | 731 | ||
| 761 | #define dbgprintk(FMT,...) \ | 732 | #define dbgprintk(FMT,...) \ |
diff --git a/fs/afs/main.c b/fs/afs/main.c index 0b187ef3b5b7..51d7d17bca57 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/completion.h> | 15 | #include <linux/completion.h> |
| 16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
| 17 | #include <linux/random.h> | 17 | #include <linux/random.h> |
| 18 | #define CREATE_TRACE_POINTS | ||
| 18 | #include "internal.h" | 19 | #include "internal.h" |
| 19 | 20 | ||
| 20 | MODULE_DESCRIPTION("AFS Client File System"); | 21 | MODULE_DESCRIPTION("AFS Client File System"); |
| @@ -30,53 +31,10 @@ static char *rootcell; | |||
| 30 | module_param(rootcell, charp, 0); | 31 | module_param(rootcell, charp, 0); |
| 31 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); | 32 | MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); |
| 32 | 33 | ||
| 33 | struct afs_uuid afs_uuid; | 34 | struct uuid_v1 afs_uuid; |
| 34 | struct workqueue_struct *afs_wq; | 35 | struct workqueue_struct *afs_wq; |
| 35 | 36 | ||
| 36 | /* | 37 | /* |
| 37 | * get a client UUID | ||
| 38 | */ | ||
| 39 | static int __init afs_get_client_UUID(void) | ||
| 40 | { | ||
| 41 | struct timespec ts; | ||
| 42 | u64 uuidtime; | ||
| 43 | u16 clockseq; | ||
| 44 | int ret; | ||
| 45 | |||
| 46 | /* read the MAC address of one of the external interfaces and construct | ||
| 47 | * a UUID from it */ | ||
| 48 | ret = afs_get_MAC_address(afs_uuid.node, sizeof(afs_uuid.node)); | ||
| 49 | if (ret < 0) | ||
| 50 | return ret; | ||
| 51 | |||
| 52 | getnstimeofday(&ts); | ||
| 53 | uuidtime = (u64) ts.tv_sec * 1000 * 1000 * 10; | ||
| 54 | uuidtime += ts.tv_nsec / 100; | ||
| 55 | uuidtime += AFS_UUID_TO_UNIX_TIME; | ||
| 56 | afs_uuid.time_low = uuidtime; | ||
| 57 | afs_uuid.time_mid = uuidtime >> 32; | ||
| 58 | afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK; | ||
| 59 | afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME; | ||
| 60 | |||
| 61 | get_random_bytes(&clockseq, 2); | ||
| 62 | afs_uuid.clock_seq_low = clockseq; | ||
| 63 | afs_uuid.clock_seq_hi_and_reserved = | ||
| 64 | (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK; | ||
| 65 | afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD; | ||
| 66 | |||
| 67 | _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", | ||
| 68 | afs_uuid.time_low, | ||
| 69 | afs_uuid.time_mid, | ||
| 70 | afs_uuid.time_hi_and_version, | ||
| 71 | afs_uuid.clock_seq_hi_and_reserved, | ||
| 72 | afs_uuid.clock_seq_low, | ||
| 73 | afs_uuid.node[0], afs_uuid.node[1], afs_uuid.node[2], | ||
| 74 | afs_uuid.node[3], afs_uuid.node[4], afs_uuid.node[5]); | ||
| 75 | |||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | |||
| 79 | /* | ||
| 80 | * initialise the AFS client FS module | 38 | * initialise the AFS client FS module |
| 81 | */ | 39 | */ |
| 82 | static int __init afs_init(void) | 40 | static int __init afs_init(void) |
| @@ -85,9 +43,7 @@ static int __init afs_init(void) | |||
| 85 | 43 | ||
| 86 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); | 44 | printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); |
| 87 | 45 | ||
| 88 | ret = afs_get_client_UUID(); | 46 | generate_random_uuid((unsigned char *)&afs_uuid); |
| 89 | if (ret < 0) | ||
| 90 | return ret; | ||
| 91 | 47 | ||
| 92 | /* create workqueue */ | 48 | /* create workqueue */ |
| 93 | ret = -ENOMEM; | 49 | ret = -ENOMEM; |
diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c index 7ad36506c256..40b2bab3e401 100644 --- a/fs/afs/netdevices.c +++ b/fs/afs/netdevices.c | |||
| @@ -12,27 +12,6 @@ | |||
| 12 | #include "internal.h" | 12 | #include "internal.h" |
| 13 | 13 | ||
| 14 | /* | 14 | /* |
| 15 | * get a MAC address from a random ethernet interface that has a real one | ||
| 16 | * - the buffer will normally be 6 bytes in size | ||
| 17 | */ | ||
| 18 | int afs_get_MAC_address(u8 *mac, size_t maclen) | ||
| 19 | { | ||
| 20 | struct net_device *dev; | ||
| 21 | int ret = -ENODEV; | ||
| 22 | |||
| 23 | BUG_ON(maclen != ETH_ALEN); | ||
| 24 | |||
| 25 | rtnl_lock(); | ||
| 26 | dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER); | ||
| 27 | if (dev) { | ||
| 28 | memcpy(mac, dev->dev_addr, maclen); | ||
| 29 | ret = 0; | ||
| 30 | } | ||
| 31 | rtnl_unlock(); | ||
| 32 | return ret; | ||
| 33 | } | ||
| 34 | |||
| 35 | /* | ||
| 36 | * get a list of this system's interface IPv4 addresses, netmasks and MTUs | 15 | * get a list of this system's interface IPv4 addresses, netmasks and MTUs |
| 37 | * - maxbufs must be at least 1 | 16 | * - maxbufs must be at least 1 |
| 38 | * - returns the number of interface records in the buffer | 17 | * - returns the number of interface records in the buffer |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 25f05a8d21b1..95f42872b787 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -19,35 +19,16 @@ | |||
| 19 | struct socket *afs_socket; /* my RxRPC socket */ | 19 | struct socket *afs_socket; /* my RxRPC socket */ |
| 20 | static struct workqueue_struct *afs_async_calls; | 20 | static struct workqueue_struct *afs_async_calls; |
| 21 | static struct afs_call *afs_spare_incoming_call; | 21 | static struct afs_call *afs_spare_incoming_call; |
| 22 | static atomic_t afs_outstanding_calls; | 22 | atomic_t afs_outstanding_calls; |
| 23 | 23 | ||
| 24 | static void afs_free_call(struct afs_call *); | ||
| 25 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); | 24 | static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); |
| 26 | static int afs_wait_for_call_to_complete(struct afs_call *); | 25 | static int afs_wait_for_call_to_complete(struct afs_call *); |
| 27 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); | 26 | static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); |
| 28 | static int afs_dont_wait_for_call_to_complete(struct afs_call *); | ||
| 29 | static void afs_process_async_call(struct work_struct *); | 27 | static void afs_process_async_call(struct work_struct *); |
| 30 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); | 28 | static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); |
| 31 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); | 29 | static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); |
| 32 | static int afs_deliver_cm_op_id(struct afs_call *); | 30 | static int afs_deliver_cm_op_id(struct afs_call *); |
| 33 | 31 | ||
| 34 | /* synchronous call management */ | ||
| 35 | const struct afs_wait_mode afs_sync_call = { | ||
| 36 | .notify_rx = afs_wake_up_call_waiter, | ||
| 37 | .wait = afs_wait_for_call_to_complete, | ||
| 38 | }; | ||
| 39 | |||
| 40 | /* asynchronous call management */ | ||
| 41 | const struct afs_wait_mode afs_async_call = { | ||
| 42 | .notify_rx = afs_wake_up_async_call, | ||
| 43 | .wait = afs_dont_wait_for_call_to_complete, | ||
| 44 | }; | ||
| 45 | |||
| 46 | /* asynchronous incoming call management */ | ||
| 47 | static const struct afs_wait_mode afs_async_incoming_call = { | ||
| 48 | .notify_rx = afs_wake_up_async_call, | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* asynchronous incoming call initial processing */ | 32 | /* asynchronous incoming call initial processing */ |
| 52 | static const struct afs_call_type afs_RXCMxxxx = { | 33 | static const struct afs_call_type afs_RXCMxxxx = { |
| 53 | .name = "CB.xxxx", | 34 | .name = "CB.xxxx", |
| @@ -130,9 +111,11 @@ void afs_close_socket(void) | |||
| 130 | { | 111 | { |
| 131 | _enter(""); | 112 | _enter(""); |
| 132 | 113 | ||
| 114 | kernel_listen(afs_socket, 0); | ||
| 115 | flush_workqueue(afs_async_calls); | ||
| 116 | |||
| 133 | if (afs_spare_incoming_call) { | 117 | if (afs_spare_incoming_call) { |
| 134 | atomic_inc(&afs_outstanding_calls); | 118 | afs_put_call(afs_spare_incoming_call); |
| 135 | afs_free_call(afs_spare_incoming_call); | ||
| 136 | afs_spare_incoming_call = NULL; | 119 | afs_spare_incoming_call = NULL; |
| 137 | } | 120 | } |
| 138 | 121 | ||
| @@ -141,7 +124,6 @@ void afs_close_socket(void) | |||
| 141 | TASK_UNINTERRUPTIBLE); | 124 | TASK_UNINTERRUPTIBLE); |
| 142 | _debug("no outstanding calls"); | 125 | _debug("no outstanding calls"); |
| 143 | 126 | ||
| 144 | flush_workqueue(afs_async_calls); | ||
| 145 | kernel_sock_shutdown(afs_socket, SHUT_RDWR); | 127 | kernel_sock_shutdown(afs_socket, SHUT_RDWR); |
| 146 | flush_workqueue(afs_async_calls); | 128 | flush_workqueue(afs_async_calls); |
| 147 | sock_release(afs_socket); | 129 | sock_release(afs_socket); |
| @@ -152,44 +134,79 @@ void afs_close_socket(void) | |||
| 152 | } | 134 | } |
| 153 | 135 | ||
| 154 | /* | 136 | /* |
| 155 | * free a call | 137 | * Allocate a call. |
| 156 | */ | 138 | */ |
| 157 | static void afs_free_call(struct afs_call *call) | 139 | static struct afs_call *afs_alloc_call(const struct afs_call_type *type, |
| 140 | gfp_t gfp) | ||
| 158 | { | 141 | { |
| 159 | _debug("DONE %p{%s} [%d]", | 142 | struct afs_call *call; |
| 160 | call, call->type->name, atomic_read(&afs_outstanding_calls)); | 143 | int o; |
| 161 | 144 | ||
| 162 | ASSERTCMP(call->rxcall, ==, NULL); | 145 | call = kzalloc(sizeof(*call), gfp); |
| 163 | ASSERT(!work_pending(&call->async_work)); | 146 | if (!call) |
| 164 | ASSERT(call->type->name != NULL); | 147 | return NULL; |
| 165 | 148 | ||
| 166 | kfree(call->request); | 149 | call->type = type; |
| 167 | kfree(call); | 150 | atomic_set(&call->usage, 1); |
| 151 | INIT_WORK(&call->async_work, afs_process_async_call); | ||
| 152 | init_waitqueue_head(&call->waitq); | ||
| 168 | 153 | ||
| 169 | if (atomic_dec_and_test(&afs_outstanding_calls)) | 154 | o = atomic_inc_return(&afs_outstanding_calls); |
| 170 | wake_up_atomic_t(&afs_outstanding_calls); | 155 | trace_afs_call(call, afs_call_trace_alloc, 1, o, |
| 156 | __builtin_return_address(0)); | ||
| 157 | return call; | ||
| 171 | } | 158 | } |
| 172 | 159 | ||
| 173 | /* | 160 | /* |
| 174 | * End a call but do not free it | 161 | * Dispose of a reference on a call. |
| 175 | */ | 162 | */ |
| 176 | static void afs_end_call_nofree(struct afs_call *call) | 163 | void afs_put_call(struct afs_call *call) |
| 177 | { | 164 | { |
| 178 | if (call->rxcall) { | 165 | int n = atomic_dec_return(&call->usage); |
| 179 | rxrpc_kernel_end_call(afs_socket, call->rxcall); | 166 | int o = atomic_read(&afs_outstanding_calls); |
| 180 | call->rxcall = NULL; | 167 | |
| 168 | trace_afs_call(call, afs_call_trace_put, n + 1, o, | ||
| 169 | __builtin_return_address(0)); | ||
| 170 | |||
| 171 | ASSERTCMP(n, >=, 0); | ||
| 172 | if (n == 0) { | ||
| 173 | ASSERT(!work_pending(&call->async_work)); | ||
| 174 | ASSERT(call->type->name != NULL); | ||
| 175 | |||
| 176 | if (call->rxcall) { | ||
| 177 | rxrpc_kernel_end_call(afs_socket, call->rxcall); | ||
| 178 | call->rxcall = NULL; | ||
| 179 | } | ||
| 180 | if (call->type->destructor) | ||
| 181 | call->type->destructor(call); | ||
| 182 | |||
| 183 | kfree(call->request); | ||
| 184 | kfree(call); | ||
| 185 | |||
| 186 | o = atomic_dec_return(&afs_outstanding_calls); | ||
| 187 | trace_afs_call(call, afs_call_trace_free, 0, o, | ||
| 188 | __builtin_return_address(0)); | ||
| 189 | if (o == 0) | ||
| 190 | wake_up_atomic_t(&afs_outstanding_calls); | ||
| 181 | } | 191 | } |
| 182 | if (call->type->destructor) | ||
| 183 | call->type->destructor(call); | ||
| 184 | } | 192 | } |
| 185 | 193 | ||
| 186 | /* | 194 | /* |
| 187 | * End a call and free it | 195 | * Queue the call for actual work. Returns 0 unconditionally for convenience. |
| 188 | */ | 196 | */ |
| 189 | static void afs_end_call(struct afs_call *call) | 197 | int afs_queue_call_work(struct afs_call *call) |
| 190 | { | 198 | { |
| 191 | afs_end_call_nofree(call); | 199 | int u = atomic_inc_return(&call->usage); |
| 192 | afs_free_call(call); | 200 | |
| 201 | trace_afs_call(call, afs_call_trace_work, u, | ||
| 202 | atomic_read(&afs_outstanding_calls), | ||
| 203 | __builtin_return_address(0)); | ||
| 204 | |||
| 205 | INIT_WORK(&call->work, call->type->work); | ||
| 206 | |||
| 207 | if (!queue_work(afs_wq, &call->work)) | ||
| 208 | afs_put_call(call); | ||
| 209 | return 0; | ||
| 193 | } | 210 | } |
| 194 | 211 | ||
| 195 | /* | 212 | /* |
| @@ -200,25 +217,19 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, | |||
| 200 | { | 217 | { |
| 201 | struct afs_call *call; | 218 | struct afs_call *call; |
| 202 | 219 | ||
| 203 | call = kzalloc(sizeof(*call), GFP_NOFS); | 220 | call = afs_alloc_call(type, GFP_NOFS); |
| 204 | if (!call) | 221 | if (!call) |
| 205 | goto nomem_call; | 222 | goto nomem_call; |
| 206 | 223 | ||
| 207 | _debug("CALL %p{%s} [%d]", | ||
| 208 | call, type->name, atomic_read(&afs_outstanding_calls)); | ||
| 209 | atomic_inc(&afs_outstanding_calls); | ||
| 210 | |||
| 211 | call->type = type; | ||
| 212 | call->request_size = request_size; | ||
| 213 | call->reply_max = reply_max; | ||
| 214 | |||
| 215 | if (request_size) { | 224 | if (request_size) { |
| 225 | call->request_size = request_size; | ||
| 216 | call->request = kmalloc(request_size, GFP_NOFS); | 226 | call->request = kmalloc(request_size, GFP_NOFS); |
| 217 | if (!call->request) | 227 | if (!call->request) |
| 218 | goto nomem_free; | 228 | goto nomem_free; |
| 219 | } | 229 | } |
| 220 | 230 | ||
| 221 | if (reply_max) { | 231 | if (reply_max) { |
| 232 | call->reply_max = reply_max; | ||
| 222 | call->buffer = kmalloc(reply_max, GFP_NOFS); | 233 | call->buffer = kmalloc(reply_max, GFP_NOFS); |
| 223 | if (!call->buffer) | 234 | if (!call->buffer) |
| 224 | goto nomem_free; | 235 | goto nomem_free; |
| @@ -228,7 +239,7 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, | |||
| 228 | return call; | 239 | return call; |
| 229 | 240 | ||
| 230 | nomem_free: | 241 | nomem_free: |
| 231 | afs_free_call(call); | 242 | afs_put_call(call); |
| 232 | nomem_call: | 243 | nomem_call: |
| 233 | return NULL; | 244 | return NULL; |
| 234 | } | 245 | } |
| @@ -315,7 +326,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, | |||
| 315 | * initiate a call | 326 | * initiate a call |
| 316 | */ | 327 | */ |
| 317 | int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | 328 | int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, |
| 318 | const struct afs_wait_mode *wait_mode) | 329 | bool async) |
| 319 | { | 330 | { |
| 320 | struct sockaddr_rxrpc srx; | 331 | struct sockaddr_rxrpc srx; |
| 321 | struct rxrpc_call *rxcall; | 332 | struct rxrpc_call *rxcall; |
| @@ -332,8 +343,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
| 332 | call, call->type->name, key_serial(call->key), | 343 | call, call->type->name, key_serial(call->key), |
| 333 | atomic_read(&afs_outstanding_calls)); | 344 | atomic_read(&afs_outstanding_calls)); |
| 334 | 345 | ||
| 335 | call->wait_mode = wait_mode; | 346 | call->async = async; |
| 336 | INIT_WORK(&call->async_work, afs_process_async_call); | ||
| 337 | 347 | ||
| 338 | memset(&srx, 0, sizeof(srx)); | 348 | memset(&srx, 0, sizeof(srx)); |
| 339 | srx.srx_family = AF_RXRPC; | 349 | srx.srx_family = AF_RXRPC; |
| @@ -347,7 +357,9 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
| 347 | /* create a call */ | 357 | /* create a call */ |
| 348 | rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, | 358 | rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, |
| 349 | (unsigned long) call, gfp, | 359 | (unsigned long) call, gfp, |
| 350 | wait_mode->notify_rx); | 360 | (async ? |
| 361 | afs_wake_up_async_call : | ||
| 362 | afs_wake_up_call_waiter)); | ||
| 351 | call->key = NULL; | 363 | call->key = NULL; |
| 352 | if (IS_ERR(rxcall)) { | 364 | if (IS_ERR(rxcall)) { |
| 353 | ret = PTR_ERR(rxcall); | 365 | ret = PTR_ERR(rxcall); |
| @@ -386,12 +398,15 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
| 386 | 398 | ||
| 387 | /* at this point, an async call may no longer exist as it may have | 399 | /* at this point, an async call may no longer exist as it may have |
| 388 | * already completed */ | 400 | * already completed */ |
| 389 | return wait_mode->wait(call); | 401 | if (call->async) |
| 402 | return -EINPROGRESS; | ||
| 403 | |||
| 404 | return afs_wait_for_call_to_complete(call); | ||
| 390 | 405 | ||
| 391 | error_do_abort: | 406 | error_do_abort: |
| 392 | rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); | 407 | rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); |
| 393 | error_kill_call: | 408 | error_kill_call: |
| 394 | afs_end_call(call); | 409 | afs_put_call(call); |
| 395 | _leave(" = %d", ret); | 410 | _leave(" = %d", ret); |
| 396 | return ret; | 411 | return ret; |
| 397 | } | 412 | } |
| @@ -416,6 +431,8 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
| 416 | ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, | 431 | ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, |
| 417 | NULL, 0, &offset, false, | 432 | NULL, 0, &offset, false, |
| 418 | &call->abort_code); | 433 | &call->abort_code); |
| 434 | trace_afs_recv_data(call, 0, offset, false, ret); | ||
| 435 | |||
| 419 | if (ret == -EINPROGRESS || ret == -EAGAIN) | 436 | if (ret == -EINPROGRESS || ret == -EAGAIN) |
| 420 | return; | 437 | return; |
| 421 | if (ret == 1 || ret < 0) { | 438 | if (ret == 1 || ret < 0) { |
| @@ -459,7 +476,7 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
| 459 | 476 | ||
| 460 | done: | 477 | done: |
| 461 | if (call->state == AFS_CALL_COMPLETE && call->incoming) | 478 | if (call->state == AFS_CALL_COMPLETE && call->incoming) |
| 462 | afs_end_call(call); | 479 | afs_put_call(call); |
| 463 | out: | 480 | out: |
| 464 | _leave(""); | 481 | _leave(""); |
| 465 | return; | 482 | return; |
| @@ -516,7 +533,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) | |||
| 516 | } | 533 | } |
| 517 | 534 | ||
| 518 | _debug("call complete"); | 535 | _debug("call complete"); |
| 519 | afs_end_call(call); | 536 | afs_put_call(call); |
| 520 | _leave(" = %d", ret); | 537 | _leave(" = %d", ret); |
| 521 | return ret; | 538 | return ret; |
| 522 | } | 539 | } |
| @@ -540,24 +557,25 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall, | |||
| 540 | unsigned long call_user_ID) | 557 | unsigned long call_user_ID) |
| 541 | { | 558 | { |
| 542 | struct afs_call *call = (struct afs_call *)call_user_ID; | 559 | struct afs_call *call = (struct afs_call *)call_user_ID; |
| 560 | int u; | ||
| 543 | 561 | ||
| 562 | trace_afs_notify_call(rxcall, call); | ||
| 544 | call->need_attention = true; | 563 | call->need_attention = true; |
| 545 | queue_work(afs_async_calls, &call->async_work); | ||
| 546 | } | ||
| 547 | 564 | ||
| 548 | /* | 565 | u = __atomic_add_unless(&call->usage, 1, 0); |
| 549 | * put a call into asynchronous mode | 566 | if (u != 0) { |
| 550 | * - mustn't touch the call descriptor as the call my have completed by the | 567 | trace_afs_call(call, afs_call_trace_wake, u, |
| 551 | * time we get here | 568 | atomic_read(&afs_outstanding_calls), |
| 552 | */ | 569 | __builtin_return_address(0)); |
| 553 | static int afs_dont_wait_for_call_to_complete(struct afs_call *call) | 570 | |
| 554 | { | 571 | if (!queue_work(afs_async_calls, &call->async_work)) |
| 555 | _enter(""); | 572 | afs_put_call(call); |
| 556 | return -EINPROGRESS; | 573 | } |
| 557 | } | 574 | } |
| 558 | 575 | ||
| 559 | /* | 576 | /* |
| 560 | * delete an asynchronous call | 577 | * Delete an asynchronous call. The work item carries a ref to the call struct |
| 578 | * that we need to release. | ||
| 561 | */ | 579 | */ |
| 562 | static void afs_delete_async_call(struct work_struct *work) | 580 | static void afs_delete_async_call(struct work_struct *work) |
| 563 | { | 581 | { |
| @@ -565,13 +583,14 @@ static void afs_delete_async_call(struct work_struct *work) | |||
| 565 | 583 | ||
| 566 | _enter(""); | 584 | _enter(""); |
| 567 | 585 | ||
| 568 | afs_free_call(call); | 586 | afs_put_call(call); |
| 569 | 587 | ||
| 570 | _leave(""); | 588 | _leave(""); |
| 571 | } | 589 | } |
| 572 | 590 | ||
| 573 | /* | 591 | /* |
| 574 | * perform processing on an asynchronous call | 592 | * Perform I/O processing on an asynchronous call. The work item carries a ref |
| 593 | * to the call struct that we either need to release or to pass on. | ||
| 575 | */ | 594 | */ |
| 576 | static void afs_process_async_call(struct work_struct *work) | 595 | static void afs_process_async_call(struct work_struct *work) |
| 577 | { | 596 | { |
| @@ -584,21 +603,19 @@ static void afs_process_async_call(struct work_struct *work) | |||
| 584 | afs_deliver_to_call(call); | 603 | afs_deliver_to_call(call); |
| 585 | } | 604 | } |
| 586 | 605 | ||
| 587 | if (call->state == AFS_CALL_COMPLETE && call->wait_mode) { | 606 | if (call->state == AFS_CALL_COMPLETE) { |
| 588 | if (call->wait_mode->async_complete) | ||
| 589 | call->wait_mode->async_complete(call->reply, | ||
| 590 | call->error); | ||
| 591 | call->reply = NULL; | 607 | call->reply = NULL; |
| 592 | 608 | ||
| 593 | /* kill the call */ | 609 | /* We have two refs to release - one from the alloc and one |
| 594 | afs_end_call_nofree(call); | 610 | * queued with the work item - and we can't just deallocate the |
| 595 | 611 | * call because the work item may be queued again. | |
| 596 | /* we can't just delete the call because the work item may be | 612 | */ |
| 597 | * queued */ | ||
| 598 | call->async_work.func = afs_delete_async_call; | 613 | call->async_work.func = afs_delete_async_call; |
| 599 | queue_work(afs_async_calls, &call->async_work); | 614 | if (!queue_work(afs_async_calls, &call->async_work)) |
| 615 | afs_put_call(call); | ||
| 600 | } | 616 | } |
| 601 | 617 | ||
| 618 | afs_put_call(call); | ||
| 602 | _leave(""); | 619 | _leave(""); |
| 603 | } | 620 | } |
| 604 | 621 | ||
| @@ -618,15 +635,13 @@ static void afs_charge_preallocation(struct work_struct *work) | |||
| 618 | 635 | ||
| 619 | for (;;) { | 636 | for (;;) { |
| 620 | if (!call) { | 637 | if (!call) { |
| 621 | call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); | 638 | call = afs_alloc_call(&afs_RXCMxxxx, GFP_KERNEL); |
| 622 | if (!call) | 639 | if (!call) |
| 623 | break; | 640 | break; |
| 624 | 641 | ||
| 625 | INIT_WORK(&call->async_work, afs_process_async_call); | 642 | call->async = true; |
| 626 | call->wait_mode = &afs_async_incoming_call; | ||
| 627 | call->type = &afs_RXCMxxxx; | ||
| 628 | init_waitqueue_head(&call->waitq); | ||
| 629 | call->state = AFS_CALL_AWAIT_OP_ID; | 643 | call->state = AFS_CALL_AWAIT_OP_ID; |
| 644 | init_waitqueue_head(&call->waitq); | ||
| 630 | } | 645 | } |
| 631 | 646 | ||
| 632 | if (rxrpc_kernel_charge_accept(afs_socket, | 647 | if (rxrpc_kernel_charge_accept(afs_socket, |
| @@ -648,9 +663,8 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, | |||
| 648 | { | 663 | { |
| 649 | struct afs_call *call = (struct afs_call *)user_call_ID; | 664 | struct afs_call *call = (struct afs_call *)user_call_ID; |
| 650 | 665 | ||
| 651 | atomic_inc(&afs_outstanding_calls); | ||
| 652 | call->rxcall = NULL; | 666 | call->rxcall = NULL; |
| 653 | afs_free_call(call); | 667 | afs_put_call(call); |
| 654 | } | 668 | } |
| 655 | 669 | ||
| 656 | /* | 670 | /* |
| @@ -659,7 +673,6 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall, | |||
| 659 | static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall, | 673 | static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall, |
| 660 | unsigned long user_call_ID) | 674 | unsigned long user_call_ID) |
| 661 | { | 675 | { |
| 662 | atomic_inc(&afs_outstanding_calls); | ||
| 663 | queue_work(afs_wq, &afs_charge_preallocation_work); | 676 | queue_work(afs_wq, &afs_charge_preallocation_work); |
| 664 | } | 677 | } |
| 665 | 678 | ||
| @@ -689,6 +702,8 @@ static int afs_deliver_cm_op_id(struct afs_call *call) | |||
| 689 | if (!afs_cm_incoming_call(call)) | 702 | if (!afs_cm_incoming_call(call)) |
| 690 | return -ENOTSUPP; | 703 | return -ENOTSUPP; |
| 691 | 704 | ||
| 705 | trace_afs_cb_call(call); | ||
| 706 | |||
| 692 | /* pass responsibility for the remainer of this message off to the | 707 | /* pass responsibility for the remainer of this message off to the |
| 693 | * cache manager op */ | 708 | * cache manager op */ |
| 694 | return call->type->deliver(call); | 709 | return call->type->deliver(call); |
| @@ -721,7 +736,6 @@ void afs_send_empty_reply(struct afs_call *call) | |||
| 721 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, | 736 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, |
| 722 | RX_USER_ABORT, ENOMEM, "KOO"); | 737 | RX_USER_ABORT, ENOMEM, "KOO"); |
| 723 | default: | 738 | default: |
| 724 | afs_end_call(call); | ||
| 725 | _leave(" [error]"); | 739 | _leave(" [error]"); |
| 726 | return; | 740 | return; |
| 727 | } | 741 | } |
| @@ -760,7 +774,6 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) | |||
| 760 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, | 774 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, |
| 761 | RX_USER_ABORT, ENOMEM, "KOO"); | 775 | RX_USER_ABORT, ENOMEM, "KOO"); |
| 762 | } | 776 | } |
| 763 | afs_end_call(call); | ||
| 764 | _leave(" [error]"); | 777 | _leave(" [error]"); |
| 765 | } | 778 | } |
| 766 | 779 | ||
| @@ -780,6 +793,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, | |||
| 780 | ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, | 793 | ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall, |
| 781 | buf, count, &call->offset, | 794 | buf, count, &call->offset, |
| 782 | want_more, &call->abort_code); | 795 | want_more, &call->abort_code); |
| 796 | trace_afs_recv_data(call, count, call->offset, want_more, ret); | ||
| 783 | if (ret == 0 || ret == -EAGAIN) | 797 | if (ret == 0 || ret == -EAGAIN) |
| 784 | return ret; | 798 | return ret; |
| 785 | 799 | ||
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 94bcd97d22b8..a5e4cc561b6c 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c | |||
| @@ -147,7 +147,7 @@ int afs_vl_get_entry_by_name(struct in_addr *addr, | |||
| 147 | struct key *key, | 147 | struct key *key, |
| 148 | const char *volname, | 148 | const char *volname, |
| 149 | struct afs_cache_vlocation *entry, | 149 | struct afs_cache_vlocation *entry, |
| 150 | const struct afs_wait_mode *wait_mode) | 150 | bool async) |
| 151 | { | 151 | { |
| 152 | struct afs_call *call; | 152 | struct afs_call *call; |
| 153 | size_t volnamesz, reqsz, padsz; | 153 | size_t volnamesz, reqsz, padsz; |
| @@ -177,7 +177,7 @@ int afs_vl_get_entry_by_name(struct in_addr *addr, | |||
| 177 | memset((void *) bp + volnamesz, 0, padsz); | 177 | memset((void *) bp + volnamesz, 0, padsz); |
| 178 | 178 | ||
| 179 | /* initiate the call */ | 179 | /* initiate the call */ |
| 180 | return afs_make_call(addr, call, GFP_KERNEL, wait_mode); | 180 | return afs_make_call(addr, call, GFP_KERNEL, async); |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | /* | 183 | /* |
| @@ -188,7 +188,7 @@ int afs_vl_get_entry_by_id(struct in_addr *addr, | |||
| 188 | afs_volid_t volid, | 188 | afs_volid_t volid, |
| 189 | afs_voltype_t voltype, | 189 | afs_voltype_t voltype, |
| 190 | struct afs_cache_vlocation *entry, | 190 | struct afs_cache_vlocation *entry, |
| 191 | const struct afs_wait_mode *wait_mode) | 191 | bool async) |
| 192 | { | 192 | { |
| 193 | struct afs_call *call; | 193 | struct afs_call *call; |
| 194 | __be32 *bp; | 194 | __be32 *bp; |
| @@ -211,5 +211,5 @@ int afs_vl_get_entry_by_id(struct in_addr *addr, | |||
| 211 | *bp = htonl(voltype); | 211 | *bp = htonl(voltype); |
| 212 | 212 | ||
| 213 | /* initiate the call */ | 213 | /* initiate the call */ |
| 214 | return afs_make_call(addr, call, GFP_KERNEL, wait_mode); | 214 | return afs_make_call(addr, call, GFP_KERNEL, async); |
| 215 | } | 215 | } |
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index 45a86396fd2d..d7d8dd8c0b31 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c | |||
| @@ -53,7 +53,7 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl, | |||
| 53 | 53 | ||
| 54 | /* attempt to access the VL server */ | 54 | /* attempt to access the VL server */ |
| 55 | ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb, | 55 | ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb, |
| 56 | &afs_sync_call); | 56 | false); |
| 57 | switch (ret) { | 57 | switch (ret) { |
| 58 | case 0: | 58 | case 0: |
| 59 | goto out; | 59 | goto out; |
| @@ -111,7 +111,7 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl, | |||
| 111 | 111 | ||
| 112 | /* attempt to access the VL server */ | 112 | /* attempt to access the VL server */ |
| 113 | ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb, | 113 | ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb, |
| 114 | &afs_sync_call); | 114 | false); |
| 115 | switch (ret) { | 115 | switch (ret) { |
| 116 | case 0: | 116 | case 0: |
| 117 | goto out; | 117 | goto out; |
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c index 25cf4c3f4ff7..dcb956143c86 100644 --- a/fs/afs/vnode.c +++ b/fs/afs/vnode.c | |||
| @@ -358,7 +358,7 @@ get_anyway: | |||
| 358 | server, ntohl(server->addr.s_addr)); | 358 | server, ntohl(server->addr.s_addr)); |
| 359 | 359 | ||
| 360 | ret = afs_fs_fetch_file_status(server, key, vnode, NULL, | 360 | ret = afs_fs_fetch_file_status(server, key, vnode, NULL, |
| 361 | &afs_sync_call); | 361 | false); |
| 362 | 362 | ||
| 363 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 363 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 364 | 364 | ||
| @@ -393,7 +393,7 @@ no_server: | |||
| 393 | * - TODO implement caching | 393 | * - TODO implement caching |
| 394 | */ | 394 | */ |
| 395 | int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key, | 395 | int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key, |
| 396 | off_t offset, size_t length, struct page *page) | 396 | struct afs_read *desc) |
| 397 | { | 397 | { |
| 398 | struct afs_server *server; | 398 | struct afs_server *server; |
| 399 | int ret; | 399 | int ret; |
| @@ -420,8 +420,8 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key, | |||
| 420 | 420 | ||
| 421 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 421 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 422 | 422 | ||
| 423 | ret = afs_fs_fetch_data(server, key, vnode, offset, length, | 423 | ret = afs_fs_fetch_data(server, key, vnode, desc, |
| 424 | page, &afs_sync_call); | 424 | false); |
| 425 | 425 | ||
| 426 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 426 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 427 | 427 | ||
| @@ -477,7 +477,7 @@ int afs_vnode_create(struct afs_vnode *vnode, struct key *key, | |||
| 477 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 477 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 478 | 478 | ||
| 479 | ret = afs_fs_create(server, key, vnode, name, mode, newfid, | 479 | ret = afs_fs_create(server, key, vnode, name, mode, newfid, |
| 480 | newstatus, newcb, &afs_sync_call); | 480 | newstatus, newcb, false); |
| 481 | 481 | ||
| 482 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 482 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 483 | 483 | ||
| @@ -533,7 +533,7 @@ int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name, | |||
| 533 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 533 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 534 | 534 | ||
| 535 | ret = afs_fs_remove(server, key, vnode, name, isdir, | 535 | ret = afs_fs_remove(server, key, vnode, name, isdir, |
| 536 | &afs_sync_call); | 536 | false); |
| 537 | 537 | ||
| 538 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 538 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 539 | 539 | ||
| @@ -595,7 +595,7 @@ int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode, | |||
| 595 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 595 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 596 | 596 | ||
| 597 | ret = afs_fs_link(server, key, dvnode, vnode, name, | 597 | ret = afs_fs_link(server, key, dvnode, vnode, name, |
| 598 | &afs_sync_call); | 598 | false); |
| 599 | 599 | ||
| 600 | } while (!afs_volume_release_fileserver(dvnode, server, ret)); | 600 | } while (!afs_volume_release_fileserver(dvnode, server, ret)); |
| 601 | 601 | ||
| @@ -659,7 +659,7 @@ int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key, | |||
| 659 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 659 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 660 | 660 | ||
| 661 | ret = afs_fs_symlink(server, key, vnode, name, content, | 661 | ret = afs_fs_symlink(server, key, vnode, name, content, |
| 662 | newfid, newstatus, &afs_sync_call); | 662 | newfid, newstatus, false); |
| 663 | 663 | ||
| 664 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 664 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 665 | 665 | ||
| @@ -729,7 +729,7 @@ int afs_vnode_rename(struct afs_vnode *orig_dvnode, | |||
| 729 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 729 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 730 | 730 | ||
| 731 | ret = afs_fs_rename(server, key, orig_dvnode, orig_name, | 731 | ret = afs_fs_rename(server, key, orig_dvnode, orig_name, |
| 732 | new_dvnode, new_name, &afs_sync_call); | 732 | new_dvnode, new_name, false); |
| 733 | 733 | ||
| 734 | } while (!afs_volume_release_fileserver(orig_dvnode, server, ret)); | 734 | } while (!afs_volume_release_fileserver(orig_dvnode, server, ret)); |
| 735 | 735 | ||
| @@ -795,7 +795,7 @@ int afs_vnode_store_data(struct afs_writeback *wb, pgoff_t first, pgoff_t last, | |||
| 795 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 795 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 796 | 796 | ||
| 797 | ret = afs_fs_store_data(server, wb, first, last, offset, to, | 797 | ret = afs_fs_store_data(server, wb, first, last, offset, to, |
| 798 | &afs_sync_call); | 798 | false); |
| 799 | 799 | ||
| 800 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 800 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 801 | 801 | ||
| @@ -847,7 +847,7 @@ int afs_vnode_setattr(struct afs_vnode *vnode, struct key *key, | |||
| 847 | 847 | ||
| 848 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 848 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 849 | 849 | ||
| 850 | ret = afs_fs_setattr(server, key, vnode, attr, &afs_sync_call); | 850 | ret = afs_fs_setattr(server, key, vnode, attr, false); |
| 851 | 851 | ||
| 852 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 852 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 853 | 853 | ||
| @@ -894,7 +894,7 @@ int afs_vnode_get_volume_status(struct afs_vnode *vnode, struct key *key, | |||
| 894 | 894 | ||
| 895 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 895 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 896 | 896 | ||
| 897 | ret = afs_fs_get_volume_status(server, key, vnode, vs, &afs_sync_call); | 897 | ret = afs_fs_get_volume_status(server, key, vnode, vs, false); |
| 898 | 898 | ||
| 899 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 899 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 900 | 900 | ||
| @@ -933,7 +933,7 @@ int afs_vnode_set_lock(struct afs_vnode *vnode, struct key *key, | |||
| 933 | 933 | ||
| 934 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 934 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 935 | 935 | ||
| 936 | ret = afs_fs_set_lock(server, key, vnode, type, &afs_sync_call); | 936 | ret = afs_fs_set_lock(server, key, vnode, type, false); |
| 937 | 937 | ||
| 938 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 938 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 939 | 939 | ||
| @@ -971,7 +971,7 @@ int afs_vnode_extend_lock(struct afs_vnode *vnode, struct key *key) | |||
| 971 | 971 | ||
| 972 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 972 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 973 | 973 | ||
| 974 | ret = afs_fs_extend_lock(server, key, vnode, &afs_sync_call); | 974 | ret = afs_fs_extend_lock(server, key, vnode, false); |
| 975 | 975 | ||
| 976 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 976 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 977 | 977 | ||
| @@ -1009,7 +1009,7 @@ int afs_vnode_release_lock(struct afs_vnode *vnode, struct key *key) | |||
| 1009 | 1009 | ||
| 1010 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); | 1010 | _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); |
| 1011 | 1011 | ||
| 1012 | ret = afs_fs_release_lock(server, key, vnode, &afs_sync_call); | 1012 | ret = afs_fs_release_lock(server, key, vnode, false); |
| 1013 | 1013 | ||
| 1014 | } while (!afs_volume_release_fileserver(vnode, server, ret)); | 1014 | } while (!afs_volume_release_fileserver(vnode, server, ret)); |
| 1015 | 1015 | ||
diff --git a/fs/afs/volume.c b/fs/afs/volume.c index d142a2449e65..546f9d01710b 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c | |||
| @@ -106,6 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params) | |||
| 106 | volume->cell = params->cell; | 106 | volume->cell = params->cell; |
| 107 | volume->vid = vlocation->vldb.vid[params->type]; | 107 | volume->vid = vlocation->vldb.vid[params->type]; |
| 108 | 108 | ||
| 109 | volume->bdi.ra_pages = VM_MAX_READAHEAD*1024/PAGE_SIZE; | ||
| 109 | ret = bdi_setup_and_register(&volume->bdi, "afs"); | 110 | ret = bdi_setup_and_register(&volume->bdi, "afs"); |
| 110 | if (ret) | 111 | if (ret) |
| 111 | goto error_bdi; | 112 | goto error_bdi; |
diff --git a/fs/afs/write.c b/fs/afs/write.c index f865c3f05bea..c83c1a0e851f 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
| @@ -86,19 +86,30 @@ void afs_put_writeback(struct afs_writeback *wb) | |||
| 86 | static int afs_fill_page(struct afs_vnode *vnode, struct key *key, | 86 | static int afs_fill_page(struct afs_vnode *vnode, struct key *key, |
| 87 | loff_t pos, struct page *page) | 87 | loff_t pos, struct page *page) |
| 88 | { | 88 | { |
| 89 | struct afs_read *req; | ||
| 89 | loff_t i_size; | 90 | loff_t i_size; |
| 90 | int ret; | 91 | int ret; |
| 91 | int len; | ||
| 92 | 92 | ||
| 93 | _enter(",,%llu", (unsigned long long)pos); | 93 | _enter(",,%llu", (unsigned long long)pos); |
| 94 | 94 | ||
| 95 | req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), | ||
| 96 | GFP_KERNEL); | ||
| 97 | if (!req) | ||
| 98 | return -ENOMEM; | ||
| 99 | |||
| 100 | atomic_set(&req->usage, 1); | ||
| 101 | req->pos = pos; | ||
| 102 | req->nr_pages = 1; | ||
| 103 | req->pages[0] = page; | ||
| 104 | |||
| 95 | i_size = i_size_read(&vnode->vfs_inode); | 105 | i_size = i_size_read(&vnode->vfs_inode); |
| 96 | if (pos + PAGE_SIZE > i_size) | 106 | if (pos + PAGE_SIZE > i_size) |
| 97 | len = i_size - pos; | 107 | req->len = i_size - pos; |
| 98 | else | 108 | else |
| 99 | len = PAGE_SIZE; | 109 | req->len = PAGE_SIZE; |
| 100 | 110 | ||
| 101 | ret = afs_vnode_fetch_data(vnode, key, pos, len, page); | 111 | ret = afs_vnode_fetch_data(vnode, key, req); |
| 112 | afs_put_read(req); | ||
| 102 | if (ret < 0) { | 113 | if (ret < 0) { |
| 103 | if (ret == -ENOENT) { | 114 | if (ret == -ENOENT) { |
| 104 | _debug("got NOENT from server" | 115 | _debug("got NOENT from server" |
| @@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2) | |||
| 1085 | * Tell lockdep we inherited freeze protection from submission | 1085 | * Tell lockdep we inherited freeze protection from submission |
| 1086 | * thread. | 1086 | * thread. |
| 1087 | */ | 1087 | */ |
| 1088 | __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); | 1088 | if (S_ISREG(file_inode(file)->i_mode)) |
| 1089 | __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); | ||
| 1089 | file_end_write(file); | 1090 | file_end_write(file); |
| 1090 | } | 1091 | } |
| 1091 | 1092 | ||
| @@ -1525,7 +1526,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, | |||
| 1525 | * by telling it the lock got released so that it doesn't | 1526 | * by telling it the lock got released so that it doesn't |
| 1526 | * complain about held lock when we return to userspace. | 1527 | * complain about held lock when we return to userspace. |
| 1527 | */ | 1528 | */ |
| 1528 | __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); | 1529 | if (S_ISREG(file_inode(file)->i_mode)) |
| 1530 | __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); | ||
| 1529 | } | 1531 | } |
| 1530 | kfree(iovec); | 1532 | kfree(iovec); |
| 1531 | return ret; | 1533 | return ret; |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 29a02daf08a9..e7bf01373bc4 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -1428,17 +1428,18 @@ static void fill_prstatus(struct elf_prstatus *prstatus, | |||
| 1428 | * group-wide total, not its individual thread total. | 1428 | * group-wide total, not its individual thread total. |
| 1429 | */ | 1429 | */ |
| 1430 | thread_group_cputime(p, &cputime); | 1430 | thread_group_cputime(p, &cputime); |
| 1431 | cputime_to_timeval(cputime.utime, &prstatus->pr_utime); | 1431 | prstatus->pr_utime = ns_to_timeval(cputime.utime); |
| 1432 | cputime_to_timeval(cputime.stime, &prstatus->pr_stime); | 1432 | prstatus->pr_stime = ns_to_timeval(cputime.stime); |
| 1433 | } else { | 1433 | } else { |
| 1434 | cputime_t utime, stime; | 1434 | u64 utime, stime; |
| 1435 | 1435 | ||
| 1436 | task_cputime(p, &utime, &stime); | 1436 | task_cputime(p, &utime, &stime); |
| 1437 | cputime_to_timeval(utime, &prstatus->pr_utime); | 1437 | prstatus->pr_utime = ns_to_timeval(utime); |
| 1438 | cputime_to_timeval(stime, &prstatus->pr_stime); | 1438 | prstatus->pr_stime = ns_to_timeval(stime); |
| 1439 | } | 1439 | } |
| 1440 | cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); | 1440 | |
| 1441 | cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); | 1441 | prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); |
| 1442 | prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); | ||
| 1442 | } | 1443 | } |
| 1443 | 1444 | ||
| 1444 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, | 1445 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, |
| @@ -2298,6 +2299,7 @@ static int elf_core_dump(struct coredump_params *cprm) | |||
| 2298 | goto end_coredump; | 2299 | goto end_coredump; |
| 2299 | } | 2300 | } |
| 2300 | } | 2301 | } |
| 2302 | dump_truncate(cprm); | ||
| 2301 | 2303 | ||
| 2302 | if (!elf_core_write_extra_data(cprm)) | 2304 | if (!elf_core_write_extra_data(cprm)) |
| 2303 | goto end_coredump; | 2305 | goto end_coredump; |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index d2e36f82c35d..ffca4bbc3d63 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
| @@ -1349,17 +1349,17 @@ static void fill_prstatus(struct elf_prstatus *prstatus, | |||
| 1349 | * group-wide total, not its individual thread total. | 1349 | * group-wide total, not its individual thread total. |
| 1350 | */ | 1350 | */ |
| 1351 | thread_group_cputime(p, &cputime); | 1351 | thread_group_cputime(p, &cputime); |
| 1352 | cputime_to_timeval(cputime.utime, &prstatus->pr_utime); | 1352 | prstatus->pr_utime = ns_to_timeval(cputime.utime); |
| 1353 | cputime_to_timeval(cputime.stime, &prstatus->pr_stime); | 1353 | prstatus->pr_stime = ns_to_timeval(cputime.stime); |
| 1354 | } else { | 1354 | } else { |
| 1355 | cputime_t utime, stime; | 1355 | u64 utime, stime; |
| 1356 | 1356 | ||
| 1357 | task_cputime(p, &utime, &stime); | 1357 | task_cputime(p, &utime, &stime); |
| 1358 | cputime_to_timeval(utime, &prstatus->pr_utime); | 1358 | prstatus->pr_utime = ns_to_timeval(utime); |
| 1359 | cputime_to_timeval(stime, &prstatus->pr_stime); | 1359 | prstatus->pr_stime = ns_to_timeval(stime); |
| 1360 | } | 1360 | } |
| 1361 | cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); | 1361 | prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); |
| 1362 | cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); | 1362 | prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); |
| 1363 | 1363 | ||
| 1364 | prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap; | 1364 | prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap; |
| 1365 | prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap; | 1365 | prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 6254cee8f8f3..73031ec54a7b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -328,9 +328,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
| 328 | struct file *file = iocb->ki_filp; | 328 | struct file *file = iocb->ki_filp; |
| 329 | struct inode *inode = bdev_file_inode(file); | 329 | struct inode *inode = bdev_file_inode(file); |
| 330 | struct block_device *bdev = I_BDEV(inode); | 330 | struct block_device *bdev = I_BDEV(inode); |
| 331 | struct blk_plug plug; | ||
| 331 | struct blkdev_dio *dio; | 332 | struct blkdev_dio *dio; |
| 332 | struct bio *bio; | 333 | struct bio *bio; |
| 333 | bool is_read = (iov_iter_rw(iter) == READ); | 334 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
| 334 | loff_t pos = iocb->ki_pos; | 335 | loff_t pos = iocb->ki_pos; |
| 335 | blk_qc_t qc = BLK_QC_T_NONE; | 336 | blk_qc_t qc = BLK_QC_T_NONE; |
| 336 | int ret; | 337 | int ret; |
| @@ -343,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
| 343 | bio_get(bio); /* extra ref for the completion handler */ | 344 | bio_get(bio); /* extra ref for the completion handler */ |
| 344 | 345 | ||
| 345 | dio = container_of(bio, struct blkdev_dio, bio); | 346 | dio = container_of(bio, struct blkdev_dio, bio); |
| 346 | dio->is_sync = is_sync_kiocb(iocb); | 347 | dio->is_sync = is_sync = is_sync_kiocb(iocb); |
| 347 | if (dio->is_sync) | 348 | if (dio->is_sync) |
| 348 | dio->waiter = current; | 349 | dio->waiter = current; |
| 349 | else | 350 | else |
| @@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
| 353 | dio->multi_bio = false; | 354 | dio->multi_bio = false; |
| 354 | dio->should_dirty = is_read && (iter->type == ITER_IOVEC); | 355 | dio->should_dirty = is_read && (iter->type == ITER_IOVEC); |
| 355 | 356 | ||
| 357 | blk_start_plug(&plug); | ||
| 356 | for (;;) { | 358 | for (;;) { |
| 357 | bio->bi_bdev = bdev; | 359 | bio->bi_bdev = bdev; |
| 358 | bio->bi_iter.bi_sector = pos >> 9; | 360 | bio->bi_iter.bi_sector = pos >> 9; |
| @@ -394,8 +396,9 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) | |||
| 394 | submit_bio(bio); | 396 | submit_bio(bio); |
| 395 | bio = bio_alloc(GFP_KERNEL, nr_pages); | 397 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
| 396 | } | 398 | } |
| 399 | blk_finish_plug(&plug); | ||
| 397 | 400 | ||
| 398 | if (!dio->is_sync) | 401 | if (!is_sync) |
| 399 | return -EIOCBQUEUED; | 402 | return -EIOCBQUEUED; |
| 400 | 403 | ||
| 401 | for (;;) { | 404 | for (;;) { |
| @@ -881,6 +884,8 @@ static void bdev_evict_inode(struct inode *inode) | |||
| 881 | spin_lock(&bdev_lock); | 884 | spin_lock(&bdev_lock); |
| 882 | list_del_init(&bdev->bd_list); | 885 | list_del_init(&bdev->bd_list); |
| 883 | spin_unlock(&bdev_lock); | 886 | spin_unlock(&bdev_lock); |
| 887 | if (bdev->bd_bdi != &noop_backing_dev_info) | ||
| 888 | bdi_put(bdev->bd_bdi); | ||
| 884 | } | 889 | } |
| 885 | 890 | ||
| 886 | static const struct super_operations bdev_sops = { | 891 | static const struct super_operations bdev_sops = { |
| @@ -951,6 +956,21 @@ static int bdev_set(struct inode *inode, void *data) | |||
| 951 | 956 | ||
| 952 | static LIST_HEAD(all_bdevs); | 957 | static LIST_HEAD(all_bdevs); |
| 953 | 958 | ||
| 959 | /* | ||
| 960 | * If there is a bdev inode for this device, unhash it so that it gets evicted | ||
| 961 | * as soon as last inode reference is dropped. | ||
| 962 | */ | ||
| 963 | void bdev_unhash_inode(dev_t dev) | ||
| 964 | { | ||
| 965 | struct inode *inode; | ||
| 966 | |||
| 967 | inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev); | ||
| 968 | if (inode) { | ||
| 969 | remove_inode_hash(inode); | ||
| 970 | iput(inode); | ||
| 971 | } | ||
| 972 | } | ||
| 973 | |||
| 954 | struct block_device *bdget(dev_t dev) | 974 | struct block_device *bdget(dev_t dev) |
| 955 | { | 975 | { |
| 956 | struct block_device *bdev; | 976 | struct block_device *bdev; |
| @@ -968,6 +988,7 @@ struct block_device *bdget(dev_t dev) | |||
| 968 | bdev->bd_contains = NULL; | 988 | bdev->bd_contains = NULL; |
| 969 | bdev->bd_super = NULL; | 989 | bdev->bd_super = NULL; |
| 970 | bdev->bd_inode = inode; | 990 | bdev->bd_inode = inode; |
| 991 | bdev->bd_bdi = &noop_backing_dev_info; | ||
| 971 | bdev->bd_block_size = (1 << inode->i_blkbits); | 992 | bdev->bd_block_size = (1 << inode->i_blkbits); |
| 972 | bdev->bd_part_count = 0; | 993 | bdev->bd_part_count = 0; |
| 973 | bdev->bd_invalidated = 0; | 994 | bdev->bd_invalidated = 0; |
| @@ -1524,6 +1545,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1524 | bdev->bd_disk = disk; | 1545 | bdev->bd_disk = disk; |
| 1525 | bdev->bd_queue = disk->queue; | 1546 | bdev->bd_queue = disk->queue; |
| 1526 | bdev->bd_contains = bdev; | 1547 | bdev->bd_contains = bdev; |
| 1548 | if (bdev->bd_bdi == &noop_backing_dev_info) | ||
| 1549 | bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); | ||
| 1527 | 1550 | ||
| 1528 | if (!partno) { | 1551 | if (!partno) { |
| 1529 | ret = -ENXIO; | 1552 | ret = -ENXIO; |
| @@ -1619,6 +1642,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1619 | bdev->bd_disk = NULL; | 1642 | bdev->bd_disk = NULL; |
| 1620 | bdev->bd_part = NULL; | 1643 | bdev->bd_part = NULL; |
| 1621 | bdev->bd_queue = NULL; | 1644 | bdev->bd_queue = NULL; |
| 1645 | bdi_put(bdev->bd_bdi); | ||
| 1646 | bdev->bd_bdi = &noop_backing_dev_info; | ||
| 1622 | if (bdev != bdev->bd_contains) | 1647 | if (bdev != bdev->bd_contains) |
| 1623 | __blkdev_put(bdev->bd_contains, mode, 1); | 1648 | __blkdev_put(bdev->bd_contains, mode, 1); |
| 1624 | bdev->bd_contains = NULL; | 1649 | bdev->bd_contains = NULL; |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 63d197724519..ff0b0be92d61 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
| @@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
| 273 | unsigned long flags; | 273 | unsigned long flags; |
| 274 | 274 | ||
| 275 | while (1) { | 275 | while (1) { |
| 276 | void *wtag; | ||
| 277 | |||
| 276 | spin_lock_irqsave(lock, flags); | 278 | spin_lock_irqsave(lock, flags); |
| 277 | if (list_empty(list)) | 279 | if (list_empty(list)) |
| 278 | break; | 280 | break; |
| @@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
| 299 | spin_unlock_irqrestore(lock, flags); | 301 | spin_unlock_irqrestore(lock, flags); |
| 300 | 302 | ||
| 301 | /* | 303 | /* |
| 302 | * we don't want to call the ordered free functions | 304 | * We don't want to call the ordered free functions with the |
| 303 | * with the lock held though | 305 | * lock held though. Save the work as tag for the trace event, |
| 306 | * because the callback could free the structure. | ||
| 304 | */ | 307 | */ |
| 308 | wtag = work; | ||
| 305 | work->ordered_free(work); | 309 | work->ordered_free(work); |
| 306 | trace_btrfs_all_work_done(work); | 310 | trace_btrfs_all_work_done(wq->fs_info, wtag); |
| 307 | } | 311 | } |
| 308 | spin_unlock_irqrestore(lock, flags); | 312 | spin_unlock_irqrestore(lock, flags); |
| 309 | } | 313 | } |
| @@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
| 311 | static void normal_work_helper(struct btrfs_work *work) | 315 | static void normal_work_helper(struct btrfs_work *work) |
| 312 | { | 316 | { |
| 313 | struct __btrfs_workqueue *wq; | 317 | struct __btrfs_workqueue *wq; |
| 318 | void *wtag; | ||
| 314 | int need_order = 0; | 319 | int need_order = 0; |
| 315 | 320 | ||
| 316 | /* | 321 | /* |
| @@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work) | |||
| 324 | if (work->ordered_func) | 329 | if (work->ordered_func) |
| 325 | need_order = 1; | 330 | need_order = 1; |
| 326 | wq = work->wq; | 331 | wq = work->wq; |
| 332 | /* Safe for tracepoints in case work gets freed by the callback */ | ||
| 333 | wtag = work; | ||
| 327 | 334 | ||
| 328 | trace_btrfs_work_sched(work); | 335 | trace_btrfs_work_sched(work); |
| 329 | thresh_exec_hook(wq); | 336 | thresh_exec_hook(wq); |
| @@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work) | |||
| 333 | run_ordered_work(wq); | 340 | run_ordered_work(wq); |
| 334 | } | 341 | } |
| 335 | if (!need_order) | 342 | if (!need_order) |
| 336 | trace_btrfs_all_work_done(work); | 343 | trace_btrfs_all_work_done(wq->fs_info, wtag); |
| 337 | } | 344 | } |
| 338 | 345 | ||
| 339 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, | 346 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7f390849343b..c4444d6f439f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
| @@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
| 1024 | unsigned long buf_offset; | 1024 | unsigned long buf_offset; |
| 1025 | unsigned long current_buf_start; | 1025 | unsigned long current_buf_start; |
| 1026 | unsigned long start_byte; | 1026 | unsigned long start_byte; |
| 1027 | unsigned long prev_start_byte; | ||
| 1027 | unsigned long working_bytes = total_out - buf_start; | 1028 | unsigned long working_bytes = total_out - buf_start; |
| 1028 | unsigned long bytes; | 1029 | unsigned long bytes; |
| 1029 | char *kaddr; | 1030 | char *kaddr; |
| @@ -1071,26 +1072,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
| 1071 | if (!bio->bi_iter.bi_size) | 1072 | if (!bio->bi_iter.bi_size) |
| 1072 | return 0; | 1073 | return 0; |
| 1073 | bvec = bio_iter_iovec(bio, bio->bi_iter); | 1074 | bvec = bio_iter_iovec(bio, bio->bi_iter); |
| 1074 | 1075 | prev_start_byte = start_byte; | |
| 1075 | start_byte = page_offset(bvec.bv_page) - disk_start; | 1076 | start_byte = page_offset(bvec.bv_page) - disk_start; |
| 1076 | 1077 | ||
| 1077 | /* | 1078 | /* |
| 1078 | * make sure our new page is covered by this | 1079 | * We need to make sure we're only adjusting |
| 1079 | * working buffer | 1080 | * our offset into compression working buffer when |
| 1081 | * we're switching pages. Otherwise we can incorrectly | ||
| 1082 | * keep copying when we were actually done. | ||
| 1080 | */ | 1083 | */ |
| 1081 | if (total_out <= start_byte) | 1084 | if (start_byte != prev_start_byte) { |
| 1082 | return 1; | 1085 | /* |
| 1086 | * make sure our new page is covered by this | ||
| 1087 | * working buffer | ||
| 1088 | */ | ||
| 1089 | if (total_out <= start_byte) | ||
| 1090 | return 1; | ||
| 1083 | 1091 | ||
| 1084 | /* | 1092 | /* |
| 1085 | * the next page in the biovec might not be adjacent | 1093 | * the next page in the biovec might not be adjacent |
| 1086 | * to the last page, but it might still be found | 1094 | * to the last page, but it might still be found |
| 1087 | * inside this working buffer. bump our offset pointer | 1095 | * inside this working buffer. bump our offset pointer |
| 1088 | */ | 1096 | */ |
| 1089 | if (total_out > start_byte && | 1097 | if (total_out > start_byte && |
| 1090 | current_buf_start < start_byte) { | 1098 | current_buf_start < start_byte) { |
| 1091 | buf_offset = start_byte - buf_start; | 1099 | buf_offset = start_byte - buf_start; |
| 1092 | working_bytes = total_out - start_byte; | 1100 | working_bytes = total_out - start_byte; |
| 1093 | current_buf_start = buf_start + buf_offset; | 1101 | current_buf_start = buf_start + buf_offset; |
| 1102 | } | ||
| 1094 | } | 1103 | } |
| 1095 | } | 1104 | } |
| 1096 | 1105 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 18004169552c..37a31b12bb0c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -1800,7 +1800,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) | |||
| 1800 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { | 1800 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { |
| 1801 | if (!device->bdev) | 1801 | if (!device->bdev) |
| 1802 | continue; | 1802 | continue; |
| 1803 | bdi = blk_get_backing_dev_info(device->bdev); | 1803 | bdi = device->bdev->bd_bdi; |
| 1804 | if (bdi_congested(bdi, bdi_bits)) { | 1804 | if (bdi_congested(bdi, bdi_bits)) { |
| 1805 | ret = 1; | 1805 | ret = 1; |
| 1806 | break; | 1806 | break; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e97302f437a1..dcd2e798767e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2522 | if (ref && ref->seq && | 2522 | if (ref && ref->seq && |
| 2523 | btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { | 2523 | btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { |
| 2524 | spin_unlock(&locked_ref->lock); | 2524 | spin_unlock(&locked_ref->lock); |
| 2525 | btrfs_delayed_ref_unlock(locked_ref); | ||
| 2526 | spin_lock(&delayed_refs->lock); | 2525 | spin_lock(&delayed_refs->lock); |
| 2527 | locked_ref->processing = 0; | 2526 | locked_ref->processing = 0; |
| 2528 | delayed_refs->num_heads_ready++; | 2527 | delayed_refs->num_heads_ready++; |
| 2529 | spin_unlock(&delayed_refs->lock); | 2528 | spin_unlock(&delayed_refs->lock); |
| 2529 | btrfs_delayed_ref_unlock(locked_ref); | ||
| 2530 | locked_ref = NULL; | 2530 | locked_ref = NULL; |
| 2531 | cond_resched(); | 2531 | cond_resched(); |
| 2532 | count++; | 2532 | count++; |
| @@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2572 | */ | 2572 | */ |
| 2573 | if (must_insert_reserved) | 2573 | if (must_insert_reserved) |
| 2574 | locked_ref->must_insert_reserved = 1; | 2574 | locked_ref->must_insert_reserved = 1; |
| 2575 | spin_lock(&delayed_refs->lock); | ||
| 2575 | locked_ref->processing = 0; | 2576 | locked_ref->processing = 0; |
| 2577 | delayed_refs->num_heads_ready++; | ||
| 2578 | spin_unlock(&delayed_refs->lock); | ||
| 2576 | btrfs_debug(fs_info, | 2579 | btrfs_debug(fs_info, |
| 2577 | "run_delayed_extent_op returned %d", | 2580 | "run_delayed_extent_op returned %d", |
| 2578 | ret); | 2581 | ret); |
| @@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, | |||
| 7384 | 7387 | ||
| 7385 | spin_unlock(&cluster->refill_lock); | 7388 | spin_unlock(&cluster->refill_lock); |
| 7386 | 7389 | ||
| 7387 | down_read(&used_bg->data_rwsem); | 7390 | /* We should only have one-level nested. */ |
| 7391 | down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); | ||
| 7388 | 7392 | ||
| 7389 | spin_lock(&cluster->refill_lock); | 7393 | spin_lock(&cluster->refill_lock); |
| 7390 | if (used_bg == cluster->block_group) | 7394 | if (used_bg == cluster->block_group) |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f2b281ad7af6..1e861a063721 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -3835,10 +3835,7 @@ cache_acl: | |||
| 3835 | break; | 3835 | break; |
| 3836 | case S_IFDIR: | 3836 | case S_IFDIR: |
| 3837 | inode->i_fop = &btrfs_dir_file_operations; | 3837 | inode->i_fop = &btrfs_dir_file_operations; |
| 3838 | if (root == fs_info->tree_root) | 3838 | inode->i_op = &btrfs_dir_inode_operations; |
| 3839 | inode->i_op = &btrfs_dir_ro_inode_operations; | ||
| 3840 | else | ||
| 3841 | inode->i_op = &btrfs_dir_inode_operations; | ||
| 3842 | break; | 3839 | break; |
| 3843 | case S_IFLNK: | 3840 | case S_IFLNK: |
| 3844 | inode->i_op = &btrfs_symlink_inode_operations; | 3841 | inode->i_op = &btrfs_symlink_inode_operations; |
| @@ -4505,8 +4502,19 @@ search_again: | |||
| 4505 | if (found_type > min_type) { | 4502 | if (found_type > min_type) { |
| 4506 | del_item = 1; | 4503 | del_item = 1; |
| 4507 | } else { | 4504 | } else { |
| 4508 | if (item_end < new_size) | 4505 | if (item_end < new_size) { |
| 4506 | /* | ||
| 4507 | * With NO_HOLES mode, for the following mapping | ||
| 4508 | * | ||
| 4509 | * [0-4k][hole][8k-12k] | ||
| 4510 | * | ||
| 4511 | * if truncating isize down to 6k, it ends up | ||
| 4512 | * isize being 8k. | ||
| 4513 | */ | ||
| 4514 | if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) | ||
| 4515 | last_size = new_size; | ||
| 4509 | break; | 4516 | break; |
| 4517 | } | ||
| 4510 | if (found_key.offset >= new_size) | 4518 | if (found_key.offset >= new_size) |
| 4511 | del_item = 1; | 4519 | del_item = 1; |
| 4512 | else | 4520 | else |
| @@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s, | |||
| 5710 | 5718 | ||
| 5711 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | 5719 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; |
| 5712 | inode->i_op = &btrfs_dir_ro_inode_operations; | 5720 | inode->i_op = &btrfs_dir_ro_inode_operations; |
| 5721 | inode->i_opflags &= ~IOP_XATTR; | ||
| 5713 | inode->i_fop = &simple_dir_operations; | 5722 | inode->i_fop = &simple_dir_operations; |
| 5714 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | 5723 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; |
| 5715 | inode->i_mtime = current_time(inode); | 5724 | inode->i_mtime = current_time(inode); |
| @@ -7059,7 +7068,7 @@ insert: | |||
| 7059 | write_unlock(&em_tree->lock); | 7068 | write_unlock(&em_tree->lock); |
| 7060 | out: | 7069 | out: |
| 7061 | 7070 | ||
| 7062 | trace_btrfs_get_extent(root, em); | 7071 | trace_btrfs_get_extent(root, inode, em); |
| 7063 | 7072 | ||
| 7064 | btrfs_free_path(path); | 7073 | btrfs_free_path(path); |
| 7065 | if (trans) { | 7074 | if (trans) { |
| @@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, | |||
| 7215 | struct extent_map *em = NULL; | 7224 | struct extent_map *em = NULL; |
| 7216 | int ret; | 7225 | int ret; |
| 7217 | 7226 | ||
| 7218 | down_read(&BTRFS_I(inode)->dio_sem); | ||
| 7219 | if (type != BTRFS_ORDERED_NOCOW) { | 7227 | if (type != BTRFS_ORDERED_NOCOW) { |
| 7220 | em = create_pinned_em(inode, start, len, orig_start, | 7228 | em = create_pinned_em(inode, start, len, orig_start, |
| 7221 | block_start, block_len, orig_block_len, | 7229 | block_start, block_len, orig_block_len, |
| @@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, | |||
| 7234 | em = ERR_PTR(ret); | 7242 | em = ERR_PTR(ret); |
| 7235 | } | 7243 | } |
| 7236 | out: | 7244 | out: |
| 7237 | up_read(&BTRFS_I(inode)->dio_sem); | ||
| 7238 | 7245 | ||
| 7239 | return em; | 7246 | return em; |
| 7240 | } | 7247 | } |
| @@ -7623,11 +7630,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode, | |||
| 7623 | * within our reservation, otherwise we need to adjust our inode | 7630 | * within our reservation, otherwise we need to adjust our inode |
| 7624 | * counter appropriately. | 7631 | * counter appropriately. |
| 7625 | */ | 7632 | */ |
| 7626 | if (dio_data->outstanding_extents) { | 7633 | if (dio_data->outstanding_extents >= num_extents) { |
| 7627 | dio_data->outstanding_extents -= num_extents; | 7634 | dio_data->outstanding_extents -= num_extents; |
| 7628 | } else { | 7635 | } else { |
| 7636 | /* | ||
| 7637 | * If dio write length has been split due to no large enough | ||
| 7638 | * contiguous space, we need to compensate our inode counter | ||
| 7639 | * appropriately. | ||
| 7640 | */ | ||
| 7641 | u64 num_needed = num_extents - dio_data->outstanding_extents; | ||
| 7642 | |||
| 7629 | spin_lock(&BTRFS_I(inode)->lock); | 7643 | spin_lock(&BTRFS_I(inode)->lock); |
| 7630 | BTRFS_I(inode)->outstanding_extents += num_extents; | 7644 | BTRFS_I(inode)->outstanding_extents += num_needed; |
| 7631 | spin_unlock(&BTRFS_I(inode)->lock); | 7645 | spin_unlock(&BTRFS_I(inode)->lock); |
| 7632 | } | 7646 | } |
| 7633 | } | 7647 | } |
| @@ -8685,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |||
| 8685 | dio_data.unsubmitted_oe_range_start = (u64)offset; | 8699 | dio_data.unsubmitted_oe_range_start = (u64)offset; |
| 8686 | dio_data.unsubmitted_oe_range_end = (u64)offset; | 8700 | dio_data.unsubmitted_oe_range_end = (u64)offset; |
| 8687 | current->journal_info = &dio_data; | 8701 | current->journal_info = &dio_data; |
| 8702 | down_read(&BTRFS_I(inode)->dio_sem); | ||
| 8688 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | 8703 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
| 8689 | &BTRFS_I(inode)->runtime_flags)) { | 8704 | &BTRFS_I(inode)->runtime_flags)) { |
| 8690 | inode_dio_end(inode); | 8705 | inode_dio_end(inode); |
| @@ -8697,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) | |||
| 8697 | iter, btrfs_get_blocks_direct, NULL, | 8712 | iter, btrfs_get_blocks_direct, NULL, |
| 8698 | btrfs_submit_direct, flags); | 8713 | btrfs_submit_direct, flags); |
| 8699 | if (iov_iter_rw(iter) == WRITE) { | 8714 | if (iov_iter_rw(iter) == WRITE) { |
| 8715 | up_read(&BTRFS_I(inode)->dio_sem); | ||
| 8700 | current->journal_info = NULL; | 8716 | current->journal_info = NULL; |
| 8701 | if (ret < 0 && ret != -EIOCBQUEUED) { | 8717 | if (ret < 0 && ret != -EIOCBQUEUED) { |
| 8702 | if (dio_data.reserve) | 8718 | if (dio_data.reserve) |
| @@ -9205,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode) | |||
| 9205 | break; | 9221 | break; |
| 9206 | } | 9222 | } |
| 9207 | 9223 | ||
| 9224 | btrfs_block_rsv_release(fs_info, rsv, -1); | ||
| 9208 | ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, | 9225 | ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, |
| 9209 | rsv, min_size, 0); | 9226 | rsv, min_size, 0); |
| 9210 | BUG_ON(ret); /* shouldn't happen */ | 9227 | BUG_ON(ret); /* shouldn't happen */ |
| @@ -10572,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = { | |||
| 10572 | static const struct inode_operations btrfs_dir_ro_inode_operations = { | 10589 | static const struct inode_operations btrfs_dir_ro_inode_operations = { |
| 10573 | .lookup = btrfs_lookup, | 10590 | .lookup = btrfs_lookup, |
| 10574 | .permission = btrfs_permission, | 10591 | .permission = btrfs_permission, |
| 10575 | .get_acl = btrfs_get_acl, | ||
| 10576 | .set_acl = btrfs_set_acl, | ||
| 10577 | .update_time = btrfs_update_time, | 10592 | .update_time = btrfs_update_time, |
| 10578 | }; | 10593 | }; |
| 10579 | 10594 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 33f967d30b2a..21e51b0ba188 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
| @@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
| 5653 | #ifdef CONFIG_COMPAT | 5653 | #ifdef CONFIG_COMPAT |
| 5654 | long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 5654 | long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 5655 | { | 5655 | { |
| 5656 | /* | ||
| 5657 | * These all access 32-bit values anyway so no further | ||
| 5658 | * handling is necessary. | ||
| 5659 | */ | ||
| 5656 | switch (cmd) { | 5660 | switch (cmd) { |
| 5657 | case FS_IOC32_GETFLAGS: | 5661 | case FS_IOC32_GETFLAGS: |
| 5658 | cmd = FS_IOC_GETFLAGS; | 5662 | cmd = FS_IOC_GETFLAGS; |
| @@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 5663 | case FS_IOC32_GETVERSION: | 5667 | case FS_IOC32_GETVERSION: |
| 5664 | cmd = FS_IOC_GETVERSION; | 5668 | cmd = FS_IOC_GETVERSION; |
| 5665 | break; | 5669 | break; |
| 5666 | default: | ||
| 5667 | return -ENOIOCTLCMD; | ||
| 5668 | } | 5670 | } |
| 5669 | 5671 | ||
| 5670 | return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); | 5672 | return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f10bf5213ed8..eeffff84f280 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | */ | 37 | */ |
| 38 | #define LOG_INODE_ALL 0 | 38 | #define LOG_INODE_ALL 0 |
| 39 | #define LOG_INODE_EXISTS 1 | 39 | #define LOG_INODE_EXISTS 1 |
| 40 | #define LOG_OTHER_INODE 2 | ||
| 40 | 41 | ||
| 41 | /* | 42 | /* |
| 42 | * directory trouble cases | 43 | * directory trouble cases |
| @@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
| 4641 | if (S_ISDIR(inode->i_mode) || | 4642 | if (S_ISDIR(inode->i_mode) || |
| 4642 | (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | 4643 | (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
| 4643 | &BTRFS_I(inode)->runtime_flags) && | 4644 | &BTRFS_I(inode)->runtime_flags) && |
| 4644 | inode_only == LOG_INODE_EXISTS)) | 4645 | inode_only >= LOG_INODE_EXISTS)) |
| 4645 | max_key.type = BTRFS_XATTR_ITEM_KEY; | 4646 | max_key.type = BTRFS_XATTR_ITEM_KEY; |
| 4646 | else | 4647 | else |
| 4647 | max_key.type = (u8)-1; | 4648 | max_key.type = (u8)-1; |
| @@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
| 4665 | return ret; | 4666 | return ret; |
| 4666 | } | 4667 | } |
| 4667 | 4668 | ||
| 4668 | mutex_lock(&BTRFS_I(inode)->log_mutex); | 4669 | if (inode_only == LOG_OTHER_INODE) { |
| 4670 | inode_only = LOG_INODE_EXISTS; | ||
| 4671 | mutex_lock_nested(&BTRFS_I(inode)->log_mutex, | ||
| 4672 | SINGLE_DEPTH_NESTING); | ||
| 4673 | } else { | ||
| 4674 | mutex_lock(&BTRFS_I(inode)->log_mutex); | ||
| 4675 | } | ||
| 4669 | 4676 | ||
| 4670 | /* | 4677 | /* |
| 4671 | * a brute force approach to making sure we get the most uptodate | 4678 | * a brute force approach to making sure we get the most uptodate |
| @@ -4817,7 +4824,7 @@ again: | |||
| 4817 | * unpin it. | 4824 | * unpin it. |
| 4818 | */ | 4825 | */ |
| 4819 | err = btrfs_log_inode(trans, root, other_inode, | 4826 | err = btrfs_log_inode(trans, root, other_inode, |
| 4820 | LOG_INODE_EXISTS, | 4827 | LOG_OTHER_INODE, |
| 4821 | 0, LLONG_MAX, ctx); | 4828 | 0, LLONG_MAX, ctx); |
| 4822 | iput(other_inode); | 4829 | iput(other_inode); |
| 4823 | if (err) | 4830 | if (err) |
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 161342b73ce5..726f928238d0 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c | |||
| @@ -352,7 +352,5 @@ skip: | |||
| 352 | 352 | ||
| 353 | out: | 353 | out: |
| 354 | btrfs_free_path(path); | 354 | btrfs_free_path(path); |
| 355 | if (ret) | 355 | return ret; |
| 356 | btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret); | ||
| 357 | return 0; | ||
| 358 | } | 356 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3c3c69c0eee4..b2e70073a10d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -366,7 +366,7 @@ static noinline void run_scheduled_bios(struct btrfs_device *device) | |||
| 366 | */ | 366 | */ |
| 367 | blk_start_plug(&plug); | 367 | blk_start_plug(&plug); |
| 368 | 368 | ||
| 369 | bdi = blk_get_backing_dev_info(device->bdev); | 369 | bdi = device->bdev->bd_bdi; |
| 370 | limit = btrfs_async_submit_limit(fs_info); | 370 | limit = btrfs_async_submit_limit(fs_info); |
| 371 | limit = limit * 2 / 3; | 371 | limit = limit * 2 / 3; |
| 372 | 372 | ||
diff --git a/fs/buffer.c b/fs/buffer.c index d21771fcf7d3..0e87401cf335 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) | |||
| 1660 | head = page_buffers(page); | 1660 | head = page_buffers(page); |
| 1661 | bh = head; | 1661 | bh = head; |
| 1662 | do { | 1662 | do { |
| 1663 | if (!buffer_mapped(bh)) | 1663 | if (!buffer_mapped(bh) || (bh->b_blocknr < block)) |
| 1664 | goto next; | 1664 | goto next; |
| 1665 | if (bh->b_blocknr >= block + len) | 1665 | if (bh->b_blocknr >= block + len) |
| 1666 | break; | 1666 | break; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 9cd0c0ea7cdb..e4b066cd912a 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -502,9 +502,9 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode, | |||
| 502 | dout(" head snapc %p has %d dirty pages\n", | 502 | dout(" head snapc %p has %d dirty pages\n", |
| 503 | snapc, ci->i_wrbuffer_ref_head); | 503 | snapc, ci->i_wrbuffer_ref_head); |
| 504 | if (truncate_size) | 504 | if (truncate_size) |
| 505 | *truncate_size = capsnap->truncate_size; | 505 | *truncate_size = ci->i_truncate_size; |
| 506 | if (truncate_seq) | 506 | if (truncate_seq) |
| 507 | *truncate_seq = capsnap->truncate_seq; | 507 | *truncate_seq = ci->i_truncate_seq; |
| 508 | } | 508 | } |
| 509 | spin_unlock(&ci->i_ceph_lock); | 509 | spin_unlock(&ci->i_ceph_lock); |
| 510 | return snapc; | 510 | return snapc; |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index baea866a6751..94fd76d04683 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
| @@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, | |||
| 2591 | add_wait_queue(&ci->i_cap_wq, &wait); | 2591 | add_wait_queue(&ci->i_cap_wq, &wait); |
| 2592 | 2592 | ||
| 2593 | while (!try_get_cap_refs(ci, need, want, endoff, | 2593 | while (!try_get_cap_refs(ci, need, want, endoff, |
| 2594 | true, &_got, &err)) | 2594 | true, &_got, &err)) { |
| 2595 | if (signal_pending(current)) { | ||
| 2596 | ret = -ERESTARTSYS; | ||
| 2597 | break; | ||
| 2598 | } | ||
| 2595 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | 2599 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
| 2600 | } | ||
| 2596 | 2601 | ||
| 2597 | remove_wait_queue(&ci->i_cap_wq, &wait); | 2602 | remove_wait_queue(&ci->i_cap_wq, &wait); |
| 2598 | 2603 | ||
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index d7a93696663b..8ab1fdf0bd49 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
| @@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
| 1230 | struct ceph_mds_client *mdsc = | 1230 | struct ceph_mds_client *mdsc = |
| 1231 | ceph_sb_to_client(dir->i_sb)->mdsc; | 1231 | ceph_sb_to_client(dir->i_sb)->mdsc; |
| 1232 | struct ceph_mds_request *req; | 1232 | struct ceph_mds_request *req; |
| 1233 | int op, mask, err; | 1233 | int op, err; |
| 1234 | u32 mask; | ||
| 1234 | 1235 | ||
| 1235 | if (flags & LOOKUP_RCU) | 1236 | if (flags & LOOKUP_RCU) |
| 1236 | return -ECHILD; | 1237 | return -ECHILD; |
| @@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
| 1245 | mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; | 1246 | mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; |
| 1246 | if (ceph_security_xattr_wanted(dir)) | 1247 | if (ceph_security_xattr_wanted(dir)) |
| 1247 | mask |= CEPH_CAP_XATTR_SHARED; | 1248 | mask |= CEPH_CAP_XATTR_SHARED; |
| 1248 | req->r_args.getattr.mask = mask; | 1249 | req->r_args.getattr.mask = cpu_to_le32(mask); |
| 1249 | 1250 | ||
| 1250 | err = ceph_mdsc_do_request(mdsc, NULL, req); | 1251 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
| 1251 | switch (err) { | 1252 | switch (err) { |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 398e5328b309..5e659d054b40 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r) | |||
| 305 | { | 305 | { |
| 306 | struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; | 306 | struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; |
| 307 | struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; | 307 | struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; |
| 308 | return ceph_frag_compare(ls->frag, rs->frag); | 308 | return ceph_frag_compare(le32_to_cpu(ls->frag), |
| 309 | le32_to_cpu(rs->frag)); | ||
| 309 | } | 310 | } |
| 310 | 311 | ||
| 311 | static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) | 312 | static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 4f49253387a0..c9d2e553a6c4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end, | |||
| 288 | struct ceph_mds_reply_info_parsed *info, | 288 | struct ceph_mds_reply_info_parsed *info, |
| 289 | u64 features) | 289 | u64 features) |
| 290 | { | 290 | { |
| 291 | if (info->head->op == CEPH_MDS_OP_GETFILELOCK) | 291 | u32 op = le32_to_cpu(info->head->op); |
| 292 | |||
| 293 | if (op == CEPH_MDS_OP_GETFILELOCK) | ||
| 292 | return parse_reply_info_filelock(p, end, info, features); | 294 | return parse_reply_info_filelock(p, end, info, features); |
| 293 | else if (info->head->op == CEPH_MDS_OP_READDIR || | 295 | else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) |
| 294 | info->head->op == CEPH_MDS_OP_LSSNAP) | ||
| 295 | return parse_reply_info_dir(p, end, info, features); | 296 | return parse_reply_info_dir(p, end, info, features); |
| 296 | else if (info->head->op == CEPH_MDS_OP_CREATE) | 297 | else if (op == CEPH_MDS_OP_CREATE) |
| 297 | return parse_reply_info_create(p, end, info, features); | 298 | return parse_reply_info_create(p, end, info, features); |
| 298 | else | 299 | else |
| 299 | return -EIO; | 300 | return -EIO; |
| @@ -2106,6 +2107,11 @@ static int __do_request(struct ceph_mds_client *mdsc, | |||
| 2106 | dout("do_request mdsmap err %d\n", err); | 2107 | dout("do_request mdsmap err %d\n", err); |
| 2107 | goto finish; | 2108 | goto finish; |
| 2108 | } | 2109 | } |
| 2110 | if (mdsc->mdsmap->m_epoch == 0) { | ||
| 2111 | dout("do_request no mdsmap, waiting for map\n"); | ||
| 2112 | list_add(&req->r_wait, &mdsc->waiting_for_map); | ||
| 2113 | goto finish; | ||
| 2114 | } | ||
| 2109 | if (!(mdsc->fsc->mount_options->flags & | 2115 | if (!(mdsc->fsc->mount_options->flags & |
| 2110 | CEPH_MOUNT_OPT_MOUNTWAIT) && | 2116 | CEPH_MOUNT_OPT_MOUNTWAIT) && |
| 2111 | !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { | 2117 | !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index e7b478b49985..034f00f21390 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
| @@ -9,8 +9,6 @@ config CIFS | |||
| 9 | select CRYPTO_ARC4 | 9 | select CRYPTO_ARC4 |
| 10 | select CRYPTO_ECB | 10 | select CRYPTO_ECB |
| 11 | select CRYPTO_DES | 11 | select CRYPTO_DES |
| 12 | select CRYPTO_SHA256 | ||
| 13 | select CRYPTO_CMAC | ||
| 14 | help | 12 | help |
| 15 | This is the client VFS module for the Common Internet File System | 13 | This is the client VFS module for the Common Internet File System |
| 16 | (CIFS) protocol which is the successor to the Server Message Block | 14 | (CIFS) protocol which is the successor to the Server Message Block |
| @@ -169,11 +167,15 @@ config CIFS_NFSD_EXPORT | |||
| 169 | 167 | ||
| 170 | config CIFS_SMB2 | 168 | config CIFS_SMB2 |
| 171 | bool "SMB2 and SMB3 network file system support" | 169 | bool "SMB2 and SMB3 network file system support" |
| 172 | depends on CIFS && INET | 170 | depends on CIFS |
| 173 | select NLS | ||
| 174 | select KEYS | 171 | select KEYS |
| 175 | select FSCACHE | 172 | select FSCACHE |
| 176 | select DNS_RESOLVER | 173 | select DNS_RESOLVER |
| 174 | select CRYPTO_AES | ||
| 175 | select CRYPTO_SHA256 | ||
| 176 | select CRYPTO_CMAC | ||
| 177 | select CRYPTO_AEAD2 | ||
| 178 | select CRYPTO_CCM | ||
| 177 | 179 | ||
| 178 | help | 180 | help |
| 179 | This enables support for the Server Message Block version 2 | 181 | This enables support for the Server Message Block version 2 |
| @@ -194,7 +196,7 @@ config CIFS_SMB2 | |||
| 194 | 196 | ||
| 195 | config CIFS_SMB311 | 197 | config CIFS_SMB311 |
| 196 | bool "SMB3.1.1 network file system support (Experimental)" | 198 | bool "SMB3.1.1 network file system support (Experimental)" |
| 197 | depends on CIFS_SMB2 && INET | 199 | depends on CIFS_SMB2 |
| 198 | 200 | ||
| 199 | help | 201 | help |
| 200 | This enables experimental support for the newest, SMB3.1.1, dialect. | 202 | This enables experimental support for the newest, SMB3.1.1, dialect. |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 66bd7fa9b7a6..058ac9b36f04 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/random.h> | 34 | #include <linux/random.h> |
| 35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
| 36 | #include <crypto/skcipher.h> | 36 | #include <crypto/skcipher.h> |
| 37 | #include <crypto/aead.h> | ||
| 37 | 38 | ||
| 38 | static int | 39 | static int |
| 39 | cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) | 40 | cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) |
| @@ -75,24 +76,20 @@ int __cifs_calc_signature(struct smb_rqst *rqst, | |||
| 75 | struct kvec *iov = rqst->rq_iov; | 76 | struct kvec *iov = rqst->rq_iov; |
| 76 | int n_vec = rqst->rq_nvec; | 77 | int n_vec = rqst->rq_nvec; |
| 77 | 78 | ||
| 78 | for (i = 0; i < n_vec; i++) { | 79 | if (n_vec < 2 || iov[0].iov_len != 4) |
| 80 | return -EIO; | ||
| 81 | |||
| 82 | for (i = 1; i < n_vec; i++) { | ||
| 79 | if (iov[i].iov_len == 0) | 83 | if (iov[i].iov_len == 0) |
| 80 | continue; | 84 | continue; |
| 81 | if (iov[i].iov_base == NULL) { | 85 | if (iov[i].iov_base == NULL) { |
| 82 | cifs_dbg(VFS, "null iovec entry\n"); | 86 | cifs_dbg(VFS, "null iovec entry\n"); |
| 83 | return -EIO; | 87 | return -EIO; |
| 84 | } | 88 | } |
| 85 | /* The first entry includes a length field (which does not get | 89 | if (i == 1 && iov[1].iov_len <= 4) |
| 86 | signed that occupies the first 4 bytes before the header */ | 90 | break; /* nothing to sign or corrupt header */ |
| 87 | if (i == 0) { | 91 | rc = crypto_shash_update(shash, |
| 88 | if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ | 92 | iov[i].iov_base, iov[i].iov_len); |
| 89 | break; /* nothing to sign or corrupt header */ | ||
| 90 | rc = crypto_shash_update(shash, | ||
| 91 | iov[i].iov_base + 4, iov[i].iov_len - 4); | ||
| 92 | } else { | ||
| 93 | rc = crypto_shash_update(shash, | ||
| 94 | iov[i].iov_base, iov[i].iov_len); | ||
| 95 | } | ||
| 96 | if (rc) { | 93 | if (rc) { |
| 97 | cifs_dbg(VFS, "%s: Could not update with payload\n", | 94 | cifs_dbg(VFS, "%s: Could not update with payload\n", |
| 98 | __func__); | 95 | __func__); |
| @@ -168,6 +165,10 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, | |||
| 168 | char smb_signature[20]; | 165 | char smb_signature[20]; |
| 169 | struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base; | 166 | struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
| 170 | 167 | ||
| 168 | if (rqst->rq_iov[0].iov_len != 4 || | ||
| 169 | rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) | ||
| 170 | return -EIO; | ||
| 171 | |||
| 171 | if ((cifs_pdu == NULL) || (server == NULL)) | 172 | if ((cifs_pdu == NULL) || (server == NULL)) |
| 172 | return -EINVAL; | 173 | return -EINVAL; |
| 173 | 174 | ||
| @@ -209,12 +210,14 @@ int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, | |||
| 209 | int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, | 210 | int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, |
| 210 | __u32 *pexpected_response_sequence_number) | 211 | __u32 *pexpected_response_sequence_number) |
| 211 | { | 212 | { |
| 212 | struct kvec iov; | 213 | struct kvec iov[2]; |
| 213 | 214 | ||
| 214 | iov.iov_base = cifs_pdu; | 215 | iov[0].iov_base = cifs_pdu; |
| 215 | iov.iov_len = be32_to_cpu(cifs_pdu->smb_buf_length) + 4; | 216 | iov[0].iov_len = 4; |
| 217 | iov[1].iov_base = (char *)cifs_pdu + 4; | ||
| 218 | iov[1].iov_len = be32_to_cpu(cifs_pdu->smb_buf_length); | ||
| 216 | 219 | ||
| 217 | return cifs_sign_smbv(&iov, 1, server, | 220 | return cifs_sign_smbv(iov, 2, server, |
| 218 | pexpected_response_sequence_number); | 221 | pexpected_response_sequence_number); |
| 219 | } | 222 | } |
| 220 | 223 | ||
| @@ -227,6 +230,10 @@ int cifs_verify_signature(struct smb_rqst *rqst, | |||
| 227 | char what_we_think_sig_should_be[20]; | 230 | char what_we_think_sig_should_be[20]; |
| 228 | struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base; | 231 | struct smb_hdr *cifs_pdu = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
| 229 | 232 | ||
| 233 | if (rqst->rq_iov[0].iov_len != 4 || | ||
| 234 | rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) | ||
| 235 | return -EIO; | ||
| 236 | |||
| 230 | if (cifs_pdu == NULL || server == NULL) | 237 | if (cifs_pdu == NULL || server == NULL) |
| 231 | return -EINVAL; | 238 | return -EINVAL; |
| 232 | 239 | ||
| @@ -868,7 +875,7 @@ out: | |||
| 868 | } | 875 | } |
| 869 | 876 | ||
| 870 | void | 877 | void |
| 871 | cifs_crypto_shash_release(struct TCP_Server_Info *server) | 878 | cifs_crypto_secmech_release(struct TCP_Server_Info *server) |
| 872 | { | 879 | { |
| 873 | if (server->secmech.cmacaes) { | 880 | if (server->secmech.cmacaes) { |
| 874 | crypto_free_shash(server->secmech.cmacaes); | 881 | crypto_free_shash(server->secmech.cmacaes); |
| @@ -890,6 +897,16 @@ cifs_crypto_shash_release(struct TCP_Server_Info *server) | |||
| 890 | server->secmech.hmacmd5 = NULL; | 897 | server->secmech.hmacmd5 = NULL; |
| 891 | } | 898 | } |
| 892 | 899 | ||
| 900 | if (server->secmech.ccmaesencrypt) { | ||
| 901 | crypto_free_aead(server->secmech.ccmaesencrypt); | ||
| 902 | server->secmech.ccmaesencrypt = NULL; | ||
| 903 | } | ||
| 904 | |||
| 905 | if (server->secmech.ccmaesdecrypt) { | ||
| 906 | crypto_free_aead(server->secmech.ccmaesdecrypt); | ||
| 907 | server->secmech.ccmaesdecrypt = NULL; | ||
| 908 | } | ||
| 909 | |||
| 893 | kfree(server->secmech.sdesccmacaes); | 910 | kfree(server->secmech.sdesccmacaes); |
| 894 | server->secmech.sdesccmacaes = NULL; | 911 | server->secmech.sdesccmacaes = NULL; |
| 895 | kfree(server->secmech.sdeschmacsha256); | 912 | kfree(server->secmech.sdeschmacsha256); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 70f4e65fced2..15e1db8738ae 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -1365,5 +1365,19 @@ MODULE_DESCRIPTION | |||
| 1365 | ("VFS to access servers complying with the SNIA CIFS Specification " | 1365 | ("VFS to access servers complying with the SNIA CIFS Specification " |
| 1366 | "e.g. Samba and Windows"); | 1366 | "e.g. Samba and Windows"); |
| 1367 | MODULE_VERSION(CIFS_VERSION); | 1367 | MODULE_VERSION(CIFS_VERSION); |
| 1368 | MODULE_SOFTDEP("pre: arc4"); | ||
| 1369 | MODULE_SOFTDEP("pre: des"); | ||
| 1370 | MODULE_SOFTDEP("pre: ecb"); | ||
| 1371 | MODULE_SOFTDEP("pre: hmac"); | ||
| 1372 | MODULE_SOFTDEP("pre: md4"); | ||
| 1373 | MODULE_SOFTDEP("pre: md5"); | ||
| 1374 | MODULE_SOFTDEP("pre: nls"); | ||
| 1375 | #ifdef CONFIG_CIFS_SMB2 | ||
| 1376 | MODULE_SOFTDEP("pre: aes"); | ||
| 1377 | MODULE_SOFTDEP("pre: cmac"); | ||
| 1378 | MODULE_SOFTDEP("pre: sha256"); | ||
| 1379 | MODULE_SOFTDEP("pre: aead2"); | ||
| 1380 | MODULE_SOFTDEP("pre: ccm"); | ||
| 1381 | #endif /* CONFIG_CIFS_SMB2 */ | ||
| 1368 | module_init(init_cifs) | 1382 | module_init(init_cifs) |
| 1369 | module_exit(exit_cifs) | 1383 | module_exit(exit_cifs) |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 7ea8a3393936..1a90bb3e2986 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -136,6 +136,8 @@ struct cifs_secmech { | |||
| 136 | struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */ | 136 | struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */ |
| 137 | struct sdesc *sdeschmacsha256; /* ctxt to generate smb2 signature */ | 137 | struct sdesc *sdeschmacsha256; /* ctxt to generate smb2 signature */ |
| 138 | struct sdesc *sdesccmacaes; /* ctxt to generate smb3 signature */ | 138 | struct sdesc *sdesccmacaes; /* ctxt to generate smb3 signature */ |
| 139 | struct crypto_aead *ccmaesencrypt; /* smb3 encryption aead */ | ||
| 140 | struct crypto_aead *ccmaesdecrypt; /* smb3 decryption aead */ | ||
| 139 | }; | 141 | }; |
| 140 | 142 | ||
| 141 | /* per smb session structure/fields */ | 143 | /* per smb session structure/fields */ |
| @@ -208,7 +210,7 @@ struct cifsInodeInfo; | |||
| 208 | struct cifs_open_parms; | 210 | struct cifs_open_parms; |
| 209 | 211 | ||
| 210 | struct smb_version_operations { | 212 | struct smb_version_operations { |
| 211 | int (*send_cancel)(struct TCP_Server_Info *, void *, | 213 | int (*send_cancel)(struct TCP_Server_Info *, struct smb_rqst *, |
| 212 | struct mid_q_entry *); | 214 | struct mid_q_entry *); |
| 213 | bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *); | 215 | bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *); |
| 214 | /* setup request: allocate mid, sign message */ | 216 | /* setup request: allocate mid, sign message */ |
| @@ -433,6 +435,14 @@ struct smb_version_operations { | |||
| 433 | bool (*dir_needs_close)(struct cifsFileInfo *); | 435 | bool (*dir_needs_close)(struct cifsFileInfo *); |
| 434 | long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t, | 436 | long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t, |
| 435 | loff_t); | 437 | loff_t); |
| 438 | /* init transform request - used for encryption for now */ | ||
| 439 | int (*init_transform_rq)(struct TCP_Server_Info *, struct smb_rqst *, | ||
| 440 | struct smb_rqst *); | ||
| 441 | /* free transform request */ | ||
| 442 | void (*free_transform_rq)(struct smb_rqst *); | ||
| 443 | int (*is_transform_hdr)(void *buf); | ||
| 444 | int (*receive_transform)(struct TCP_Server_Info *, | ||
| 445 | struct mid_q_entry **); | ||
| 436 | }; | 446 | }; |
| 437 | 447 | ||
| 438 | struct smb_version_values { | 448 | struct smb_version_values { |
| @@ -1119,7 +1129,10 @@ struct cifs_readdata { | |||
| 1119 | int (*read_into_pages)(struct TCP_Server_Info *server, | 1129 | int (*read_into_pages)(struct TCP_Server_Info *server, |
| 1120 | struct cifs_readdata *rdata, | 1130 | struct cifs_readdata *rdata, |
| 1121 | unsigned int len); | 1131 | unsigned int len); |
| 1122 | struct kvec iov; | 1132 | int (*copy_into_pages)(struct TCP_Server_Info *server, |
| 1133 | struct cifs_readdata *rdata, | ||
| 1134 | struct iov_iter *iter); | ||
| 1135 | struct kvec iov[2]; | ||
| 1123 | unsigned int pagesz; | 1136 | unsigned int pagesz; |
| 1124 | unsigned int tailsz; | 1137 | unsigned int tailsz; |
| 1125 | unsigned int credits; | 1138 | unsigned int credits; |
| @@ -1302,6 +1315,13 @@ typedef int (mid_receive_t)(struct TCP_Server_Info *server, | |||
| 1302 | */ | 1315 | */ |
| 1303 | typedef void (mid_callback_t)(struct mid_q_entry *mid); | 1316 | typedef void (mid_callback_t)(struct mid_q_entry *mid); |
| 1304 | 1317 | ||
| 1318 | /* | ||
| 1319 | * This is the protopyte for mid handle function. This is called once the mid | ||
| 1320 | * has been recognized after decryption of the message. | ||
| 1321 | */ | ||
| 1322 | typedef int (mid_handle_t)(struct TCP_Server_Info *server, | ||
| 1323 | struct mid_q_entry *mid); | ||
| 1324 | |||
| 1305 | /* one of these for every pending CIFS request to the server */ | 1325 | /* one of these for every pending CIFS request to the server */ |
| 1306 | struct mid_q_entry { | 1326 | struct mid_q_entry { |
| 1307 | struct list_head qhead; /* mids waiting on reply from this server */ | 1327 | struct list_head qhead; /* mids waiting on reply from this server */ |
| @@ -1316,6 +1336,7 @@ struct mid_q_entry { | |||
| 1316 | #endif | 1336 | #endif |
| 1317 | mid_receive_t *receive; /* call receive callback */ | 1337 | mid_receive_t *receive; /* call receive callback */ |
| 1318 | mid_callback_t *callback; /* call completion callback */ | 1338 | mid_callback_t *callback; /* call completion callback */ |
| 1339 | mid_handle_t *handle; /* call handle mid callback */ | ||
| 1319 | void *callback_data; /* general purpose pointer for callback */ | 1340 | void *callback_data; /* general purpose pointer for callback */ |
| 1320 | void *resp_buf; /* pointer to received SMB header */ | 1341 | void *resp_buf; /* pointer to received SMB header */ |
| 1321 | int mid_state; /* wish this were enum but can not pass to wait_event */ | 1342 | int mid_state; /* wish this were enum but can not pass to wait_event */ |
| @@ -1323,6 +1344,7 @@ struct mid_q_entry { | |||
| 1323 | bool large_buf:1; /* if valid response, is pointer to large buf */ | 1344 | bool large_buf:1; /* if valid response, is pointer to large buf */ |
| 1324 | bool multiRsp:1; /* multiple trans2 responses for one request */ | 1345 | bool multiRsp:1; /* multiple trans2 responses for one request */ |
| 1325 | bool multiEnd:1; /* both received */ | 1346 | bool multiEnd:1; /* both received */ |
| 1347 | bool decrypted:1; /* decrypted entry */ | ||
| 1326 | }; | 1348 | }; |
| 1327 | 1349 | ||
| 1328 | /* Make code in transport.c a little cleaner by moving | 1350 | /* Make code in transport.c a little cleaner by moving |
| @@ -1475,7 +1497,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, | |||
| 1475 | #define CIFS_OBREAK_OP 0x0100 /* oplock break request */ | 1497 | #define CIFS_OBREAK_OP 0x0100 /* oplock break request */ |
| 1476 | #define CIFS_NEG_OP 0x0200 /* negotiate request */ | 1498 | #define CIFS_NEG_OP 0x0200 /* negotiate request */ |
| 1477 | #define CIFS_OP_MASK 0x0380 /* mask request type */ | 1499 | #define CIFS_OP_MASK 0x0380 /* mask request type */ |
| 1500 | |||
| 1478 | #define CIFS_HAS_CREDITS 0x0400 /* already has credits */ | 1501 | #define CIFS_HAS_CREDITS 0x0400 /* already has credits */ |
| 1502 | #define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */ | ||
| 1479 | 1503 | ||
| 1480 | /* Security Flags: indicate type of session setup needed */ | 1504 | /* Security Flags: indicate type of session setup needed */ |
| 1481 | #define CIFSSEC_MAY_SIGN 0x00001 | 1505 | #define CIFSSEC_MAY_SIGN 0x00001 |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index c7b3c841e660..406d2c10ba78 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
| @@ -75,10 +75,16 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, | |||
| 75 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); | 75 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); |
| 76 | extern void cifs_delete_mid(struct mid_q_entry *mid); | 76 | extern void cifs_delete_mid(struct mid_q_entry *mid); |
| 77 | extern void cifs_wake_up_task(struct mid_q_entry *mid); | 77 | extern void cifs_wake_up_task(struct mid_q_entry *mid); |
| 78 | extern int cifs_handle_standard(struct TCP_Server_Info *server, | ||
| 79 | struct mid_q_entry *mid); | ||
| 80 | extern int cifs_discard_remaining_data(struct TCP_Server_Info *server); | ||
| 78 | extern int cifs_call_async(struct TCP_Server_Info *server, | 81 | extern int cifs_call_async(struct TCP_Server_Info *server, |
| 79 | struct smb_rqst *rqst, | 82 | struct smb_rqst *rqst, |
| 80 | mid_receive_t *receive, mid_callback_t *callback, | 83 | mid_receive_t *receive, mid_callback_t *callback, |
| 81 | void *cbdata, const int flags); | 84 | mid_handle_t *handle, void *cbdata, const int flags); |
| 85 | extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, | ||
| 86 | struct smb_rqst *rqst, int *resp_buf_type, | ||
| 87 | const int flags, struct kvec *resp_iov); | ||
| 82 | extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, | 88 | extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, |
| 83 | struct smb_hdr * /* input */ , | 89 | struct smb_hdr * /* input */ , |
| 84 | struct smb_hdr * /* out */ , | 90 | struct smb_hdr * /* out */ , |
| @@ -96,7 +102,8 @@ extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server, | |||
| 96 | unsigned int *credits); | 102 | unsigned int *credits); |
| 97 | extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, | 103 | extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, |
| 98 | struct kvec *, int /* nvec to send */, | 104 | struct kvec *, int /* nvec to send */, |
| 99 | int * /* type of buf returned */ , const int flags); | 105 | int * /* type of buf returned */, const int flags, |
| 106 | struct kvec * /* resp vec */); | ||
| 100 | extern int SendReceiveBlockingLock(const unsigned int xid, | 107 | extern int SendReceiveBlockingLock(const unsigned int xid, |
| 101 | struct cifs_tcon *ptcon, | 108 | struct cifs_tcon *ptcon, |
| 102 | struct smb_hdr *in_buf , | 109 | struct smb_hdr *in_buf , |
| @@ -441,7 +448,7 @@ extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *, | |||
| 441 | const struct nls_table *); | 448 | const struct nls_table *); |
| 442 | extern int setup_ntlm_response(struct cifs_ses *, const struct nls_table *); | 449 | extern int setup_ntlm_response(struct cifs_ses *, const struct nls_table *); |
| 443 | extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *); | 450 | extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *); |
| 444 | extern void cifs_crypto_shash_release(struct TCP_Server_Info *); | 451 | extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server); |
| 445 | extern int calc_seckey(struct cifs_ses *); | 452 | extern int calc_seckey(struct cifs_ses *); |
| 446 | extern int generate_smb30signingkey(struct cifs_ses *); | 453 | extern int generate_smb30signingkey(struct cifs_ses *); |
| 447 | extern int generate_smb311signingkey(struct cifs_ses *); | 454 | extern int generate_smb311signingkey(struct cifs_ses *); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b47261858e6d..f5099fb8a22f 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -673,6 +673,7 @@ CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 673 | return rc; | 673 | return rc; |
| 674 | 674 | ||
| 675 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0); | 675 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0); |
| 676 | cifs_small_buf_release(smb_buffer); | ||
| 676 | if (rc) | 677 | if (rc) |
| 677 | cifs_dbg(FYI, "Tree disconnect failed %d\n", rc); | 678 | cifs_dbg(FYI, "Tree disconnect failed %d\n", rc); |
| 678 | 679 | ||
| @@ -707,9 +708,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
| 707 | { | 708 | { |
| 708 | ECHO_REQ *smb; | 709 | ECHO_REQ *smb; |
| 709 | int rc = 0; | 710 | int rc = 0; |
| 710 | struct kvec iov; | 711 | struct kvec iov[2]; |
| 711 | struct smb_rqst rqst = { .rq_iov = &iov, | 712 | struct smb_rqst rqst = { .rq_iov = iov, |
| 712 | .rq_nvec = 1 }; | 713 | .rq_nvec = 2 }; |
| 713 | 714 | ||
| 714 | cifs_dbg(FYI, "In echo request\n"); | 715 | cifs_dbg(FYI, "In echo request\n"); |
| 715 | 716 | ||
| @@ -724,10 +725,13 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
| 724 | put_bcc(1, &smb->hdr); | 725 | put_bcc(1, &smb->hdr); |
| 725 | smb->Data[0] = 'a'; | 726 | smb->Data[0] = 'a'; |
| 726 | inc_rfc1001_len(smb, 3); | 727 | inc_rfc1001_len(smb, 3); |
| 727 | iov.iov_base = smb; | ||
| 728 | iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; | ||
| 729 | 728 | ||
| 730 | rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, | 729 | iov[0].iov_len = 4; |
| 730 | iov[0].iov_base = smb; | ||
| 731 | iov[1].iov_len = get_rfc1002_length(smb); | ||
| 732 | iov[1].iov_base = (char *)smb + 4; | ||
| 733 | |||
| 734 | rc = cifs_call_async(server, &rqst, NULL, cifs_echo_callback, NULL, | ||
| 731 | server, CIFS_ASYNC_OP | CIFS_ECHO_OP); | 735 | server, CIFS_ASYNC_OP | CIFS_ECHO_OP); |
| 732 | if (rc) | 736 | if (rc) |
| 733 | cifs_dbg(FYI, "Echo request failed: %d\n", rc); | 737 | cifs_dbg(FYI, "Echo request failed: %d\n", rc); |
| @@ -772,6 +776,7 @@ CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses) | |||
| 772 | 776 | ||
| 773 | pSMB->AndXCommand = 0xFF; | 777 | pSMB->AndXCommand = 0xFF; |
| 774 | rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0); | 778 | rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0); |
| 779 | cifs_small_buf_release(pSMB); | ||
| 775 | session_already_dead: | 780 | session_already_dead: |
| 776 | mutex_unlock(&ses->session_mutex); | 781 | mutex_unlock(&ses->session_mutex); |
| 777 | 782 | ||
| @@ -1394,8 +1399,8 @@ openRetry: | |||
| 1394 | * Discard any remaining data in the current SMB. To do this, we borrow the | 1399 | * Discard any remaining data in the current SMB. To do this, we borrow the |
| 1395 | * current bigbuf. | 1400 | * current bigbuf. |
| 1396 | */ | 1401 | */ |
| 1397 | static int | 1402 | int |
| 1398 | discard_remaining_data(struct TCP_Server_Info *server) | 1403 | cifs_discard_remaining_data(struct TCP_Server_Info *server) |
| 1399 | { | 1404 | { |
| 1400 | unsigned int rfclen = get_rfc1002_length(server->smallbuf); | 1405 | unsigned int rfclen = get_rfc1002_length(server->smallbuf); |
| 1401 | int remaining = rfclen + 4 - server->total_read; | 1406 | int remaining = rfclen + 4 - server->total_read; |
| @@ -1421,7 +1426,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
| 1421 | int length; | 1426 | int length; |
| 1422 | struct cifs_readdata *rdata = mid->callback_data; | 1427 | struct cifs_readdata *rdata = mid->callback_data; |
| 1423 | 1428 | ||
| 1424 | length = discard_remaining_data(server); | 1429 | length = cifs_discard_remaining_data(server); |
| 1425 | dequeue_mid(mid, rdata->result); | 1430 | dequeue_mid(mid, rdata->result); |
| 1426 | return length; | 1431 | return length; |
| 1427 | } | 1432 | } |
| @@ -1454,7 +1459,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
| 1454 | 1459 | ||
| 1455 | if (server->ops->is_status_pending && | 1460 | if (server->ops->is_status_pending && |
| 1456 | server->ops->is_status_pending(buf, server, 0)) { | 1461 | server->ops->is_status_pending(buf, server, 0)) { |
| 1457 | discard_remaining_data(server); | 1462 | cifs_discard_remaining_data(server); |
| 1458 | return -1; | 1463 | return -1; |
| 1459 | } | 1464 | } |
| 1460 | 1465 | ||
| @@ -1507,10 +1512,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
| 1507 | } | 1512 | } |
| 1508 | 1513 | ||
| 1509 | /* set up first iov for signature check */ | 1514 | /* set up first iov for signature check */ |
| 1510 | rdata->iov.iov_base = buf; | 1515 | rdata->iov[0].iov_base = buf; |
| 1511 | rdata->iov.iov_len = server->total_read; | 1516 | rdata->iov[0].iov_len = 4; |
| 1512 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | 1517 | rdata->iov[1].iov_base = buf + 4; |
| 1513 | rdata->iov.iov_base, rdata->iov.iov_len); | 1518 | rdata->iov[1].iov_len = server->total_read - 4; |
| 1519 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", | ||
| 1520 | rdata->iov[0].iov_base, server->total_read); | ||
| 1514 | 1521 | ||
| 1515 | /* how much data is in the response? */ | 1522 | /* how much data is in the response? */ |
| 1516 | data_len = server->ops->read_data_length(buf); | 1523 | data_len = server->ops->read_data_length(buf); |
| @@ -1543,8 +1550,8 @@ cifs_readv_callback(struct mid_q_entry *mid) | |||
| 1543 | struct cifs_readdata *rdata = mid->callback_data; | 1550 | struct cifs_readdata *rdata = mid->callback_data; |
| 1544 | struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); | 1551 | struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); |
| 1545 | struct TCP_Server_Info *server = tcon->ses->server; | 1552 | struct TCP_Server_Info *server = tcon->ses->server; |
| 1546 | struct smb_rqst rqst = { .rq_iov = &rdata->iov, | 1553 | struct smb_rqst rqst = { .rq_iov = rdata->iov, |
| 1547 | .rq_nvec = 1, | 1554 | .rq_nvec = 2, |
| 1548 | .rq_pages = rdata->pages, | 1555 | .rq_pages = rdata->pages, |
| 1549 | .rq_npages = rdata->nr_pages, | 1556 | .rq_npages = rdata->nr_pages, |
| 1550 | .rq_pagesz = rdata->pagesz, | 1557 | .rq_pagesz = rdata->pagesz, |
| @@ -1599,8 +1606,8 @@ cifs_async_readv(struct cifs_readdata *rdata) | |||
| 1599 | READ_REQ *smb = NULL; | 1606 | READ_REQ *smb = NULL; |
| 1600 | int wct; | 1607 | int wct; |
| 1601 | struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); | 1608 | struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); |
| 1602 | struct smb_rqst rqst = { .rq_iov = &rdata->iov, | 1609 | struct smb_rqst rqst = { .rq_iov = rdata->iov, |
| 1603 | .rq_nvec = 1 }; | 1610 | .rq_nvec = 2 }; |
| 1604 | 1611 | ||
| 1605 | cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", | 1612 | cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", |
| 1606 | __func__, rdata->offset, rdata->bytes); | 1613 | __func__, rdata->offset, rdata->bytes); |
| @@ -1640,12 +1647,14 @@ cifs_async_readv(struct cifs_readdata *rdata) | |||
| 1640 | } | 1647 | } |
| 1641 | 1648 | ||
| 1642 | /* 4 for RFC1001 length + 1 for BCC */ | 1649 | /* 4 for RFC1001 length + 1 for BCC */ |
| 1643 | rdata->iov.iov_base = smb; | 1650 | rdata->iov[0].iov_base = smb; |
| 1644 | rdata->iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; | 1651 | rdata->iov[0].iov_len = 4; |
| 1652 | rdata->iov[1].iov_base = (char *)smb + 4; | ||
| 1653 | rdata->iov[1].iov_len = get_rfc1002_length(smb); | ||
| 1645 | 1654 | ||
| 1646 | kref_get(&rdata->refcount); | 1655 | kref_get(&rdata->refcount); |
| 1647 | rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive, | 1656 | rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive, |
| 1648 | cifs_readv_callback, rdata, 0); | 1657 | cifs_readv_callback, NULL, rdata, 0); |
| 1649 | 1658 | ||
| 1650 | if (rc == 0) | 1659 | if (rc == 0) |
| 1651 | cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); | 1660 | cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); |
| @@ -1667,6 +1676,7 @@ CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 1667 | int wct; | 1676 | int wct; |
| 1668 | int resp_buf_type = 0; | 1677 | int resp_buf_type = 0; |
| 1669 | struct kvec iov[1]; | 1678 | struct kvec iov[1]; |
| 1679 | struct kvec rsp_iov; | ||
| 1670 | __u32 pid = io_parms->pid; | 1680 | __u32 pid = io_parms->pid; |
| 1671 | __u16 netfid = io_parms->netfid; | 1681 | __u16 netfid = io_parms->netfid; |
| 1672 | __u64 offset = io_parms->offset; | 1682 | __u64 offset = io_parms->offset; |
| @@ -1716,10 +1726,11 @@ CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 1716 | 1726 | ||
| 1717 | iov[0].iov_base = (char *)pSMB; | 1727 | iov[0].iov_base = (char *)pSMB; |
| 1718 | iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; | 1728 | iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; |
| 1719 | rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, | 1729 | rc = SendReceive2(xid, tcon->ses, iov, 1, &resp_buf_type, |
| 1720 | &resp_buf_type, CIFS_LOG_ERROR); | 1730 | CIFS_LOG_ERROR, &rsp_iov); |
| 1731 | cifs_small_buf_release(pSMB); | ||
| 1721 | cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); | 1732 | cifs_stats_inc(&tcon->stats.cifs_stats.num_reads); |
| 1722 | pSMBr = (READ_RSP *)iov[0].iov_base; | 1733 | pSMBr = (READ_RSP *)rsp_iov.iov_base; |
| 1723 | if (rc) { | 1734 | if (rc) { |
| 1724 | cifs_dbg(VFS, "Send error in read = %d\n", rc); | 1735 | cifs_dbg(VFS, "Send error in read = %d\n", rc); |
| 1725 | } else { | 1736 | } else { |
| @@ -1747,12 +1758,11 @@ CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 1747 | } | 1758 | } |
| 1748 | } | 1759 | } |
| 1749 | 1760 | ||
| 1750 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | ||
| 1751 | if (*buf) { | 1761 | if (*buf) { |
| 1752 | free_rsp_buf(resp_buf_type, iov[0].iov_base); | 1762 | free_rsp_buf(resp_buf_type, rsp_iov.iov_base); |
| 1753 | } else if (resp_buf_type != CIFS_NO_BUFFER) { | 1763 | } else if (resp_buf_type != CIFS_NO_BUFFER) { |
| 1754 | /* return buffer to caller to free */ | 1764 | /* return buffer to caller to free */ |
| 1755 | *buf = iov[0].iov_base; | 1765 | *buf = rsp_iov.iov_base; |
| 1756 | if (resp_buf_type == CIFS_SMALL_BUFFER) | 1766 | if (resp_buf_type == CIFS_SMALL_BUFFER) |
| 1757 | *pbuf_type = CIFS_SMALL_BUFFER; | 1767 | *pbuf_type = CIFS_SMALL_BUFFER; |
| 1758 | else if (resp_buf_type == CIFS_LARGE_BUFFER) | 1768 | else if (resp_buf_type == CIFS_LARGE_BUFFER) |
| @@ -2093,7 +2103,7 @@ cifs_async_writev(struct cifs_writedata *wdata, | |||
| 2093 | WRITE_REQ *smb = NULL; | 2103 | WRITE_REQ *smb = NULL; |
| 2094 | int wct; | 2104 | int wct; |
| 2095 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | 2105 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); |
| 2096 | struct kvec iov; | 2106 | struct kvec iov[2]; |
| 2097 | struct smb_rqst rqst = { }; | 2107 | struct smb_rqst rqst = { }; |
| 2098 | 2108 | ||
| 2099 | if (tcon->ses->capabilities & CAP_LARGE_FILES) { | 2109 | if (tcon->ses->capabilities & CAP_LARGE_FILES) { |
| @@ -2126,11 +2136,13 @@ cifs_async_writev(struct cifs_writedata *wdata, | |||
| 2126 | cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); | 2136 | cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); |
| 2127 | 2137 | ||
| 2128 | /* 4 for RFC1001 length + 1 for BCC */ | 2138 | /* 4 for RFC1001 length + 1 for BCC */ |
| 2129 | iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; | 2139 | iov[0].iov_len = 4; |
| 2130 | iov.iov_base = smb; | 2140 | iov[0].iov_base = smb; |
| 2141 | iov[1].iov_len = get_rfc1002_length(smb) + 1; | ||
| 2142 | iov[1].iov_base = (char *)smb + 4; | ||
| 2131 | 2143 | ||
| 2132 | rqst.rq_iov = &iov; | 2144 | rqst.rq_iov = iov; |
| 2133 | rqst.rq_nvec = 1; | 2145 | rqst.rq_nvec = 2; |
| 2134 | rqst.rq_pages = wdata->pages; | 2146 | rqst.rq_pages = wdata->pages; |
| 2135 | rqst.rq_npages = wdata->nr_pages; | 2147 | rqst.rq_npages = wdata->nr_pages; |
| 2136 | rqst.rq_pagesz = wdata->pagesz; | 2148 | rqst.rq_pagesz = wdata->pagesz; |
| @@ -2151,12 +2163,12 @@ cifs_async_writev(struct cifs_writedata *wdata, | |||
| 2151 | (struct smb_com_writex_req *)smb; | 2163 | (struct smb_com_writex_req *)smb; |
| 2152 | inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); | 2164 | inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); |
| 2153 | put_bcc(wdata->bytes + 5, &smbw->hdr); | 2165 | put_bcc(wdata->bytes + 5, &smbw->hdr); |
| 2154 | iov.iov_len += 4; /* pad bigger by four bytes */ | 2166 | iov[1].iov_len += 4; /* pad bigger by four bytes */ |
| 2155 | } | 2167 | } |
| 2156 | 2168 | ||
| 2157 | kref_get(&wdata->refcount); | 2169 | kref_get(&wdata->refcount); |
| 2158 | rc = cifs_call_async(tcon->ses->server, &rqst, NULL, | 2170 | rc = cifs_call_async(tcon->ses->server, &rqst, NULL, |
| 2159 | cifs_writev_callback, wdata, 0); | 2171 | cifs_writev_callback, NULL, wdata, 0); |
| 2160 | 2172 | ||
| 2161 | if (rc == 0) | 2173 | if (rc == 0) |
| 2162 | cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); | 2174 | cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); |
| @@ -2182,6 +2194,7 @@ CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2182 | __u64 offset = io_parms->offset; | 2194 | __u64 offset = io_parms->offset; |
| 2183 | struct cifs_tcon *tcon = io_parms->tcon; | 2195 | struct cifs_tcon *tcon = io_parms->tcon; |
| 2184 | unsigned int count = io_parms->length; | 2196 | unsigned int count = io_parms->length; |
| 2197 | struct kvec rsp_iov; | ||
| 2185 | 2198 | ||
| 2186 | *nbytes = 0; | 2199 | *nbytes = 0; |
| 2187 | 2200 | ||
| @@ -2240,8 +2253,9 @@ CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2240 | else /* wct == 12 pad bigger by four bytes */ | 2253 | else /* wct == 12 pad bigger by four bytes */ |
| 2241 | iov[0].iov_len = smb_hdr_len + 8; | 2254 | iov[0].iov_len = smb_hdr_len + 8; |
| 2242 | 2255 | ||
| 2243 | 2256 | rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, 0, | |
| 2244 | rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type, 0); | 2257 | &rsp_iov); |
| 2258 | cifs_small_buf_release(pSMB); | ||
| 2245 | cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); | 2259 | cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); |
| 2246 | if (rc) { | 2260 | if (rc) { |
| 2247 | cifs_dbg(FYI, "Send error Write2 = %d\n", rc); | 2261 | cifs_dbg(FYI, "Send error Write2 = %d\n", rc); |
| @@ -2249,7 +2263,7 @@ CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2249 | /* presumably this can not happen, but best to be safe */ | 2263 | /* presumably this can not happen, but best to be safe */ |
| 2250 | rc = -EIO; | 2264 | rc = -EIO; |
| 2251 | } else { | 2265 | } else { |
| 2252 | WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base; | 2266 | WRITE_RSP *pSMBr = (WRITE_RSP *)rsp_iov.iov_base; |
| 2253 | *nbytes = le16_to_cpu(pSMBr->CountHigh); | 2267 | *nbytes = le16_to_cpu(pSMBr->CountHigh); |
| 2254 | *nbytes = (*nbytes) << 16; | 2268 | *nbytes = (*nbytes) << 16; |
| 2255 | *nbytes += le16_to_cpu(pSMBr->Count); | 2269 | *nbytes += le16_to_cpu(pSMBr->Count); |
| @@ -2263,8 +2277,7 @@ CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2263 | *nbytes &= 0xFFFF; | 2277 | *nbytes &= 0xFFFF; |
| 2264 | } | 2278 | } |
| 2265 | 2279 | ||
| 2266 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | 2280 | free_rsp_buf(resp_buf_type, rsp_iov.iov_base); |
| 2267 | free_rsp_buf(resp_buf_type, iov[0].iov_base); | ||
| 2268 | 2281 | ||
| 2269 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 2282 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
| 2270 | since file handle passed in no longer valid */ | 2283 | since file handle passed in no longer valid */ |
| @@ -2279,6 +2292,7 @@ int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2279 | int rc = 0; | 2292 | int rc = 0; |
| 2280 | LOCK_REQ *pSMB = NULL; | 2293 | LOCK_REQ *pSMB = NULL; |
| 2281 | struct kvec iov[2]; | 2294 | struct kvec iov[2]; |
| 2295 | struct kvec rsp_iov; | ||
| 2282 | int resp_buf_type; | 2296 | int resp_buf_type; |
| 2283 | __u16 count; | 2297 | __u16 count; |
| 2284 | 2298 | ||
| @@ -2307,7 +2321,9 @@ int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2307 | iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); | 2321 | iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE); |
| 2308 | 2322 | ||
| 2309 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); | 2323 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); |
| 2310 | rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP); | 2324 | rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP, |
| 2325 | &rsp_iov); | ||
| 2326 | cifs_small_buf_release(pSMB); | ||
| 2311 | if (rc) | 2327 | if (rc) |
| 2312 | cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc); | 2328 | cifs_dbg(FYI, "Send error in cifs_lockv = %d\n", rc); |
| 2313 | 2329 | ||
| @@ -2368,14 +2384,12 @@ CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2368 | inc_rfc1001_len(pSMB, count); | 2384 | inc_rfc1001_len(pSMB, count); |
| 2369 | pSMB->ByteCount = cpu_to_le16(count); | 2385 | pSMB->ByteCount = cpu_to_le16(count); |
| 2370 | 2386 | ||
| 2371 | if (waitFlag) { | 2387 | if (waitFlag) |
| 2372 | rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, | 2388 | rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, |
| 2373 | (struct smb_hdr *) pSMB, &bytes_returned); | 2389 | (struct smb_hdr *) pSMB, &bytes_returned); |
| 2374 | cifs_small_buf_release(pSMB); | 2390 | else |
| 2375 | } else { | ||
| 2376 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags); | 2391 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags); |
| 2377 | /* SMB buffer freed by function above */ | 2392 | cifs_small_buf_release(pSMB); |
| 2378 | } | ||
| 2379 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); | 2393 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); |
| 2380 | if (rc) | 2394 | if (rc) |
| 2381 | cifs_dbg(FYI, "Send error in Lock = %d\n", rc); | 2395 | cifs_dbg(FYI, "Send error in Lock = %d\n", rc); |
| @@ -2401,6 +2415,7 @@ CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2401 | int resp_buf_type = 0; | 2415 | int resp_buf_type = 0; |
| 2402 | __u16 params, param_offset, offset, byte_count, count; | 2416 | __u16 params, param_offset, offset, byte_count, count; |
| 2403 | struct kvec iov[1]; | 2417 | struct kvec iov[1]; |
| 2418 | struct kvec rsp_iov; | ||
| 2404 | 2419 | ||
| 2405 | cifs_dbg(FYI, "Posix Lock\n"); | 2420 | cifs_dbg(FYI, "Posix Lock\n"); |
| 2406 | 2421 | ||
| @@ -2462,11 +2477,10 @@ CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2462 | iov[0].iov_base = (char *)pSMB; | 2477 | iov[0].iov_base = (char *)pSMB; |
| 2463 | iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; | 2478 | iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; |
| 2464 | rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, | 2479 | rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, |
| 2465 | &resp_buf_type, timeout); | 2480 | &resp_buf_type, timeout, &rsp_iov); |
| 2466 | pSMB = NULL; /* request buf already freed by SendReceive2. Do | 2481 | pSMBr = (struct smb_com_transaction2_sfi_rsp *)rsp_iov.iov_base; |
| 2467 | not try to free it twice below on exit */ | ||
| 2468 | pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base; | ||
| 2469 | } | 2482 | } |
| 2483 | cifs_small_buf_release(pSMB); | ||
| 2470 | 2484 | ||
| 2471 | if (rc) { | 2485 | if (rc) { |
| 2472 | cifs_dbg(FYI, "Send error in Posix Lock = %d\n", rc); | 2486 | cifs_dbg(FYI, "Send error in Posix Lock = %d\n", rc); |
| @@ -2506,10 +2520,7 @@ CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2506 | } | 2520 | } |
| 2507 | 2521 | ||
| 2508 | plk_err_exit: | 2522 | plk_err_exit: |
| 2509 | if (pSMB) | 2523 | free_rsp_buf(resp_buf_type, rsp_iov.iov_base); |
| 2510 | cifs_small_buf_release(pSMB); | ||
| 2511 | |||
| 2512 | free_rsp_buf(resp_buf_type, iov[0].iov_base); | ||
| 2513 | 2524 | ||
| 2514 | /* Note: On -EAGAIN error only caller can retry on handle based calls | 2525 | /* Note: On -EAGAIN error only caller can retry on handle based calls |
| 2515 | since file handle passed in no longer valid */ | 2526 | since file handle passed in no longer valid */ |
| @@ -2536,6 +2547,7 @@ CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id) | |||
| 2536 | pSMB->LastWriteTime = 0xFFFFFFFF; | 2547 | pSMB->LastWriteTime = 0xFFFFFFFF; |
| 2537 | pSMB->ByteCount = 0; | 2548 | pSMB->ByteCount = 0; |
| 2538 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 2549 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 2550 | cifs_small_buf_release(pSMB); | ||
| 2539 | cifs_stats_inc(&tcon->stats.cifs_stats.num_closes); | 2551 | cifs_stats_inc(&tcon->stats.cifs_stats.num_closes); |
| 2540 | if (rc) { | 2552 | if (rc) { |
| 2541 | if (rc != -EINTR) { | 2553 | if (rc != -EINTR) { |
| @@ -2565,6 +2577,7 @@ CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id) | |||
| 2565 | pSMB->FileID = (__u16) smb_file_id; | 2577 | pSMB->FileID = (__u16) smb_file_id; |
| 2566 | pSMB->ByteCount = 0; | 2578 | pSMB->ByteCount = 0; |
| 2567 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 2579 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 2580 | cifs_small_buf_release(pSMB); | ||
| 2568 | cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes); | 2581 | cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes); |
| 2569 | if (rc) | 2582 | if (rc) |
| 2570 | cifs_dbg(VFS, "Send error in Flush = %d\n", rc); | 2583 | cifs_dbg(VFS, "Send error in Flush = %d\n", rc); |
| @@ -3820,6 +3833,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, | |||
| 3820 | int buf_type = 0; | 3833 | int buf_type = 0; |
| 3821 | QUERY_SEC_DESC_REQ *pSMB; | 3834 | QUERY_SEC_DESC_REQ *pSMB; |
| 3822 | struct kvec iov[1]; | 3835 | struct kvec iov[1]; |
| 3836 | struct kvec rsp_iov; | ||
| 3823 | 3837 | ||
| 3824 | cifs_dbg(FYI, "GetCifsACL\n"); | 3838 | cifs_dbg(FYI, "GetCifsACL\n"); |
| 3825 | 3839 | ||
| @@ -3843,7 +3857,8 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, | |||
| 3843 | iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; | 3857 | iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4; |
| 3844 | 3858 | ||
| 3845 | rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, | 3859 | rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, |
| 3846 | 0); | 3860 | 0, &rsp_iov); |
| 3861 | cifs_small_buf_release(pSMB); | ||
| 3847 | cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get); | 3862 | cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get); |
| 3848 | if (rc) { | 3863 | if (rc) { |
| 3849 | cifs_dbg(FYI, "Send error in QuerySecDesc = %d\n", rc); | 3864 | cifs_dbg(FYI, "Send error in QuerySecDesc = %d\n", rc); |
| @@ -3855,11 +3870,11 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, | |||
| 3855 | char *pdata; | 3870 | char *pdata; |
| 3856 | 3871 | ||
| 3857 | /* validate_nttransact */ | 3872 | /* validate_nttransact */ |
| 3858 | rc = validate_ntransact(iov[0].iov_base, (char **)&parm, | 3873 | rc = validate_ntransact(rsp_iov.iov_base, (char **)&parm, |
| 3859 | &pdata, &parm_len, pbuflen); | 3874 | &pdata, &parm_len, pbuflen); |
| 3860 | if (rc) | 3875 | if (rc) |
| 3861 | goto qsec_out; | 3876 | goto qsec_out; |
| 3862 | pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base; | 3877 | pSMBr = (struct smb_com_ntransact_rsp *)rsp_iov.iov_base; |
| 3863 | 3878 | ||
| 3864 | cifs_dbg(FYI, "smb %p parm %p data %p\n", | 3879 | cifs_dbg(FYI, "smb %p parm %p data %p\n", |
| 3865 | pSMBr, parm, *acl_inf); | 3880 | pSMBr, parm, *acl_inf); |
| @@ -3896,8 +3911,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, | |||
| 3896 | } | 3911 | } |
| 3897 | } | 3912 | } |
| 3898 | qsec_out: | 3913 | qsec_out: |
| 3899 | free_rsp_buf(buf_type, iov[0].iov_base); | 3914 | free_rsp_buf(buf_type, rsp_iov.iov_base); |
| 3900 | /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ | ||
| 3901 | return rc; | 3915 | return rc; |
| 3902 | } | 3916 | } |
| 3903 | 3917 | ||
| @@ -4666,6 +4680,7 @@ CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 4666 | pSMB->FileID = searchHandle; | 4680 | pSMB->FileID = searchHandle; |
| 4667 | pSMB->ByteCount = 0; | 4681 | pSMB->ByteCount = 0; |
| 4668 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 4682 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 4683 | cifs_small_buf_release(pSMB); | ||
| 4669 | if (rc) | 4684 | if (rc) |
| 4670 | cifs_dbg(VFS, "Send error in FindClose = %d\n", rc); | 4685 | cifs_dbg(VFS, "Send error in FindClose = %d\n", rc); |
| 4671 | 4686 | ||
| @@ -5687,6 +5702,7 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 5687 | inc_rfc1001_len(pSMB, byte_count); | 5702 | inc_rfc1001_len(pSMB, byte_count); |
| 5688 | pSMB->ByteCount = cpu_to_le16(byte_count); | 5703 | pSMB->ByteCount = cpu_to_le16(byte_count); |
| 5689 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 5704 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 5705 | cifs_small_buf_release(pSMB); | ||
| 5690 | if (rc) { | 5706 | if (rc) { |
| 5691 | cifs_dbg(FYI, "Send error in SetFileInfo (SetFileSize) = %d\n", | 5707 | cifs_dbg(FYI, "Send error in SetFileInfo (SetFileSize) = %d\n", |
| 5692 | rc); | 5708 | rc); |
| @@ -5758,6 +5774,7 @@ CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 5758 | pSMB->ByteCount = cpu_to_le16(byte_count); | 5774 | pSMB->ByteCount = cpu_to_le16(byte_count); |
| 5759 | memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); | 5775 | memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); |
| 5760 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 5776 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 5777 | cifs_small_buf_release(pSMB); | ||
| 5761 | if (rc) | 5778 | if (rc) |
| 5762 | cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", | 5779 | cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", |
| 5763 | rc); | 5780 | rc); |
| @@ -5818,6 +5835,7 @@ CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 5818 | pSMB->ByteCount = cpu_to_le16(byte_count); | 5835 | pSMB->ByteCount = cpu_to_le16(byte_count); |
| 5819 | *data_offset = delete_file ? 1 : 0; | 5836 | *data_offset = delete_file ? 1 : 0; |
| 5820 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 5837 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 5838 | cifs_small_buf_release(pSMB); | ||
| 5821 | if (rc) | 5839 | if (rc) |
| 5822 | cifs_dbg(FYI, "Send error in SetFileDisposition = %d\n", rc); | 5840 | cifs_dbg(FYI, "Send error in SetFileDisposition = %d\n", rc); |
| 5823 | 5841 | ||
| @@ -6057,6 +6075,7 @@ CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 6057 | cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args); | 6075 | cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args); |
| 6058 | 6076 | ||
| 6059 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); | 6077 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0); |
| 6078 | cifs_small_buf_release(pSMB); | ||
| 6060 | if (rc) | 6079 | if (rc) |
| 6061 | cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", | 6080 | cifs_dbg(FYI, "Send error in Set Time (SetFileInfo) = %d\n", |
| 6062 | rc); | 6081 | rc); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 35ae49ed1f76..777ad9f4fc3c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -787,6 +787,15 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
| 787 | 787 | ||
| 788 | dump_smb(buf, server->total_read); | 788 | dump_smb(buf, server->total_read); |
| 789 | 789 | ||
| 790 | return cifs_handle_standard(server, mid); | ||
| 791 | } | ||
| 792 | |||
| 793 | int | ||
| 794 | cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
| 795 | { | ||
| 796 | char *buf = server->large_buf ? server->bigbuf : server->smallbuf; | ||
| 797 | int length; | ||
| 798 | |||
| 790 | /* | 799 | /* |
| 791 | * We know that we received enough to get to the MID as we | 800 | * We know that we received enough to get to the MID as we |
| 792 | * checked the pdu_length earlier. Now check to see | 801 | * checked the pdu_length earlier. Now check to see |
| @@ -872,12 +881,19 @@ cifs_demultiplex_thread(void *p) | |||
| 872 | continue; | 881 | continue; |
| 873 | server->total_read += length; | 882 | server->total_read += length; |
| 874 | 883 | ||
| 875 | mid_entry = server->ops->find_mid(server, buf); | 884 | if (server->ops->is_transform_hdr && |
| 885 | server->ops->receive_transform && | ||
| 886 | server->ops->is_transform_hdr(buf)) { | ||
| 887 | length = server->ops->receive_transform(server, | ||
| 888 | &mid_entry); | ||
| 889 | } else { | ||
| 890 | mid_entry = server->ops->find_mid(server, buf); | ||
| 876 | 891 | ||
| 877 | if (!mid_entry || !mid_entry->receive) | 892 | if (!mid_entry || !mid_entry->receive) |
| 878 | length = standard_receive3(server, mid_entry); | 893 | length = standard_receive3(server, mid_entry); |
| 879 | else | 894 | else |
| 880 | length = mid_entry->receive(server, mid_entry); | 895 | length = mid_entry->receive(server, mid_entry); |
| 896 | } | ||
| 881 | 897 | ||
| 882 | if (length < 0) | 898 | if (length < 0) |
| 883 | continue; | 899 | continue; |
| @@ -2154,7 +2170,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) | |||
| 2154 | server->tcpStatus = CifsExiting; | 2170 | server->tcpStatus = CifsExiting; |
| 2155 | spin_unlock(&GlobalMid_Lock); | 2171 | spin_unlock(&GlobalMid_Lock); |
| 2156 | 2172 | ||
| 2157 | cifs_crypto_shash_release(server); | 2173 | cifs_crypto_secmech_release(server); |
| 2158 | cifs_fscache_release_client_cookie(server); | 2174 | cifs_fscache_release_client_cookie(server); |
| 2159 | 2175 | ||
| 2160 | kfree(server->session_key.response); | 2176 | kfree(server->session_key.response); |
| @@ -2273,7 +2289,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
| 2273 | return tcp_ses; | 2289 | return tcp_ses; |
| 2274 | 2290 | ||
| 2275 | out_err_crypto_release: | 2291 | out_err_crypto_release: |
| 2276 | cifs_crypto_shash_release(tcp_ses); | 2292 | cifs_crypto_secmech_release(tcp_ses); |
| 2277 | 2293 | ||
| 2278 | put_net(cifs_net_ns(tcp_ses)); | 2294 | put_net(cifs_net_ns(tcp_ses)); |
| 2279 | 2295 | ||
| @@ -2614,12 +2630,18 @@ get_ses_fail: | |||
| 2614 | return ERR_PTR(rc); | 2630 | return ERR_PTR(rc); |
| 2615 | } | 2631 | } |
| 2616 | 2632 | ||
| 2617 | static int match_tcon(struct cifs_tcon *tcon, const char *unc) | 2633 | static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info) |
| 2618 | { | 2634 | { |
| 2619 | if (tcon->tidStatus == CifsExiting) | 2635 | if (tcon->tidStatus == CifsExiting) |
| 2620 | return 0; | 2636 | return 0; |
| 2621 | if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) | 2637 | if (strncmp(tcon->treeName, volume_info->UNC, MAX_TREE_SIZE)) |
| 2622 | return 0; | 2638 | return 0; |
| 2639 | if (tcon->seal != volume_info->seal) | ||
| 2640 | return 0; | ||
| 2641 | #ifdef CONFIG_CIFS_SMB2 | ||
| 2642 | if (tcon->snapshot_time != volume_info->snapshot_time) | ||
| 2643 | return 0; | ||
| 2644 | #endif /* CONFIG_CIFS_SMB2 */ | ||
| 2623 | return 1; | 2645 | return 1; |
| 2624 | } | 2646 | } |
| 2625 | 2647 | ||
| @@ -2632,14 +2654,8 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
| 2632 | spin_lock(&cifs_tcp_ses_lock); | 2654 | spin_lock(&cifs_tcp_ses_lock); |
| 2633 | list_for_each(tmp, &ses->tcon_list) { | 2655 | list_for_each(tmp, &ses->tcon_list) { |
| 2634 | tcon = list_entry(tmp, struct cifs_tcon, tcon_list); | 2656 | tcon = list_entry(tmp, struct cifs_tcon, tcon_list); |
| 2635 | if (!match_tcon(tcon, volume_info->UNC)) | 2657 | if (!match_tcon(tcon, volume_info)) |
| 2636 | continue; | ||
| 2637 | |||
| 2638 | #ifdef CONFIG_CIFS_SMB2 | ||
| 2639 | if (tcon->snapshot_time != volume_info->snapshot_time) | ||
| 2640 | continue; | 2658 | continue; |
| 2641 | #endif /* CONFIG_CIFS_SMB2 */ | ||
| 2642 | |||
| 2643 | ++tcon->tc_count; | 2659 | ++tcon->tc_count; |
| 2644 | spin_unlock(&cifs_tcp_ses_lock); | 2660 | spin_unlock(&cifs_tcp_ses_lock); |
| 2645 | return tcon; | 2661 | return tcon; |
| @@ -2685,8 +2701,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
| 2685 | cifs_dbg(FYI, "Found match on UNC path\n"); | 2701 | cifs_dbg(FYI, "Found match on UNC path\n"); |
| 2686 | /* existing tcon already has a reference */ | 2702 | /* existing tcon already has a reference */ |
| 2687 | cifs_put_smb_ses(ses); | 2703 | cifs_put_smb_ses(ses); |
| 2688 | if (tcon->seal != volume_info->seal) | ||
| 2689 | cifs_dbg(VFS, "transport encryption setting conflicts with existing tid\n"); | ||
| 2690 | return tcon; | 2704 | return tcon; |
| 2691 | } | 2705 | } |
| 2692 | 2706 | ||
| @@ -2742,7 +2756,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
| 2742 | tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; | 2756 | tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; |
| 2743 | cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags); | 2757 | cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags); |
| 2744 | } | 2758 | } |
| 2745 | tcon->seal = volume_info->seal; | ||
| 2746 | tcon->use_persistent = false; | 2759 | tcon->use_persistent = false; |
| 2747 | /* check if SMB2 or later, CIFS does not support persistent handles */ | 2760 | /* check if SMB2 or later, CIFS does not support persistent handles */ |
| 2748 | if (volume_info->persistent) { | 2761 | if (volume_info->persistent) { |
| @@ -2779,6 +2792,24 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
| 2779 | tcon->use_resilient = true; | 2792 | tcon->use_resilient = true; |
| 2780 | } | 2793 | } |
| 2781 | 2794 | ||
| 2795 | if (volume_info->seal) { | ||
| 2796 | if (ses->server->vals->protocol_id == 0) { | ||
| 2797 | cifs_dbg(VFS, | ||
| 2798 | "SMB3 or later required for encryption\n"); | ||
| 2799 | rc = -EOPNOTSUPP; | ||
| 2800 | goto out_fail; | ||
| 2801 | #ifdef CONFIG_CIFS_SMB2 | ||
| 2802 | } else if (tcon->ses->server->capabilities & | ||
| 2803 | SMB2_GLOBAL_CAP_ENCRYPTION) | ||
| 2804 | tcon->seal = true; | ||
| 2805 | else { | ||
| 2806 | cifs_dbg(VFS, "Encryption is not supported on share\n"); | ||
| 2807 | rc = -EOPNOTSUPP; | ||
| 2808 | goto out_fail; | ||
| 2809 | #endif /* CONFIG_CIFS_SMB2 */ | ||
| 2810 | } | ||
| 2811 | } | ||
| 2812 | |||
| 2782 | /* | 2813 | /* |
| 2783 | * We can have only one retry value for a connection to a share so for | 2814 | * We can have only one retry value for a connection to a share so for |
| 2784 | * resources mounted more than once to the same server share the last | 2815 | * resources mounted more than once to the same server share the last |
| @@ -2910,7 +2941,7 @@ cifs_match_super(struct super_block *sb, void *data) | |||
| 2910 | 2941 | ||
| 2911 | if (!match_server(tcp_srv, volume_info) || | 2942 | if (!match_server(tcp_srv, volume_info) || |
| 2912 | !match_session(ses, volume_info) || | 2943 | !match_session(ses, volume_info) || |
| 2913 | !match_tcon(tcon, volume_info->UNC) || | 2944 | !match_tcon(tcon, volume_info) || |
| 2914 | !match_prepath(sb, mnt_data)) { | 2945 | !match_prepath(sb, mnt_data)) { |
| 2915 | rc = 0; | 2946 | rc = 0; |
| 2916 | goto out; | 2947 | goto out; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 18a1e1d6671f..98dc842e7245 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -2884,7 +2884,15 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter) | |||
| 2884 | for (i = 0; i < rdata->nr_pages; i++) { | 2884 | for (i = 0; i < rdata->nr_pages; i++) { |
| 2885 | struct page *page = rdata->pages[i]; | 2885 | struct page *page = rdata->pages[i]; |
| 2886 | size_t copy = min_t(size_t, remaining, PAGE_SIZE); | 2886 | size_t copy = min_t(size_t, remaining, PAGE_SIZE); |
| 2887 | size_t written = copy_page_to_iter(page, 0, copy, iter); | 2887 | size_t written; |
| 2888 | |||
| 2889 | if (unlikely(iter->type & ITER_PIPE)) { | ||
| 2890 | void *addr = kmap_atomic(page); | ||
| 2891 | |||
| 2892 | written = copy_to_iter(addr, copy, iter); | ||
| 2893 | kunmap_atomic(addr); | ||
| 2894 | } else | ||
| 2895 | written = copy_page_to_iter(page, 0, copy, iter); | ||
| 2888 | remaining -= written; | 2896 | remaining -= written; |
| 2889 | if (written < copy && iov_iter_count(iter) > 0) | 2897 | if (written < copy && iov_iter_count(iter) > 0) |
| 2890 | break; | 2898 | break; |
| @@ -2903,8 +2911,9 @@ cifs_uncached_readv_complete(struct work_struct *work) | |||
| 2903 | } | 2911 | } |
| 2904 | 2912 | ||
| 2905 | static int | 2913 | static int |
| 2906 | cifs_uncached_read_into_pages(struct TCP_Server_Info *server, | 2914 | uncached_fill_pages(struct TCP_Server_Info *server, |
| 2907 | struct cifs_readdata *rdata, unsigned int len) | 2915 | struct cifs_readdata *rdata, struct iov_iter *iter, |
| 2916 | unsigned int len) | ||
| 2908 | { | 2917 | { |
| 2909 | int result = 0; | 2918 | int result = 0; |
| 2910 | unsigned int i; | 2919 | unsigned int i; |
| @@ -2933,7 +2942,10 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server, | |||
| 2933 | rdata->tailsz = len; | 2942 | rdata->tailsz = len; |
| 2934 | len = 0; | 2943 | len = 0; |
| 2935 | } | 2944 | } |
| 2936 | result = cifs_read_page_from_socket(server, page, n); | 2945 | if (iter) |
| 2946 | result = copy_page_from_iter(page, 0, n, iter); | ||
| 2947 | else | ||
| 2948 | result = cifs_read_page_from_socket(server, page, n); | ||
| 2937 | if (result < 0) | 2949 | if (result < 0) |
| 2938 | break; | 2950 | break; |
| 2939 | 2951 | ||
| @@ -2945,6 +2957,21 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server, | |||
| 2945 | } | 2957 | } |
| 2946 | 2958 | ||
| 2947 | static int | 2959 | static int |
| 2960 | cifs_uncached_read_into_pages(struct TCP_Server_Info *server, | ||
| 2961 | struct cifs_readdata *rdata, unsigned int len) | ||
| 2962 | { | ||
| 2963 | return uncached_fill_pages(server, rdata, NULL, len); | ||
| 2964 | } | ||
| 2965 | |||
| 2966 | static int | ||
| 2967 | cifs_uncached_copy_into_pages(struct TCP_Server_Info *server, | ||
| 2968 | struct cifs_readdata *rdata, | ||
| 2969 | struct iov_iter *iter) | ||
| 2970 | { | ||
| 2971 | return uncached_fill_pages(server, rdata, iter, iter->count); | ||
| 2972 | } | ||
| 2973 | |||
| 2974 | static int | ||
| 2948 | cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, | 2975 | cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, |
| 2949 | struct cifs_sb_info *cifs_sb, struct list_head *rdata_list) | 2976 | struct cifs_sb_info *cifs_sb, struct list_head *rdata_list) |
| 2950 | { | 2977 | { |
| @@ -2991,6 +3018,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, | |||
| 2991 | rdata->pid = pid; | 3018 | rdata->pid = pid; |
| 2992 | rdata->pagesz = PAGE_SIZE; | 3019 | rdata->pagesz = PAGE_SIZE; |
| 2993 | rdata->read_into_pages = cifs_uncached_read_into_pages; | 3020 | rdata->read_into_pages = cifs_uncached_read_into_pages; |
| 3021 | rdata->copy_into_pages = cifs_uncached_copy_into_pages; | ||
| 2994 | rdata->credits = credits; | 3022 | rdata->credits = credits; |
| 2995 | 3023 | ||
| 2996 | if (!rdata->cfile->invalidHandle || | 3024 | if (!rdata->cfile->invalidHandle || |
| @@ -3341,8 +3369,9 @@ cifs_readv_complete(struct work_struct *work) | |||
| 3341 | } | 3369 | } |
| 3342 | 3370 | ||
| 3343 | static int | 3371 | static int |
| 3344 | cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | 3372 | readpages_fill_pages(struct TCP_Server_Info *server, |
| 3345 | struct cifs_readdata *rdata, unsigned int len) | 3373 | struct cifs_readdata *rdata, struct iov_iter *iter, |
| 3374 | unsigned int len) | ||
| 3346 | { | 3375 | { |
| 3347 | int result = 0; | 3376 | int result = 0; |
| 3348 | unsigned int i; | 3377 | unsigned int i; |
| @@ -3396,7 +3425,10 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | |||
| 3396 | continue; | 3425 | continue; |
| 3397 | } | 3426 | } |
| 3398 | 3427 | ||
| 3399 | result = cifs_read_page_from_socket(server, page, n); | 3428 | if (iter) |
| 3429 | result = copy_page_from_iter(page, 0, n, iter); | ||
| 3430 | else | ||
| 3431 | result = cifs_read_page_from_socket(server, page, n); | ||
| 3400 | if (result < 0) | 3432 | if (result < 0) |
| 3401 | break; | 3433 | break; |
| 3402 | 3434 | ||
| @@ -3408,6 +3440,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | |||
| 3408 | } | 3440 | } |
| 3409 | 3441 | ||
| 3410 | static int | 3442 | static int |
| 3443 | cifs_readpages_read_into_pages(struct TCP_Server_Info *server, | ||
| 3444 | struct cifs_readdata *rdata, unsigned int len) | ||
| 3445 | { | ||
| 3446 | return readpages_fill_pages(server, rdata, NULL, len); | ||
| 3447 | } | ||
| 3448 | |||
| 3449 | static int | ||
| 3450 | cifs_readpages_copy_into_pages(struct TCP_Server_Info *server, | ||
| 3451 | struct cifs_readdata *rdata, | ||
| 3452 | struct iov_iter *iter) | ||
| 3453 | { | ||
| 3454 | return readpages_fill_pages(server, rdata, iter, iter->count); | ||
| 3455 | } | ||
| 3456 | |||
| 3457 | static int | ||
| 3411 | readpages_get_pages(struct address_space *mapping, struct list_head *page_list, | 3458 | readpages_get_pages(struct address_space *mapping, struct list_head *page_list, |
| 3412 | unsigned int rsize, struct list_head *tmplist, | 3459 | unsigned int rsize, struct list_head *tmplist, |
| 3413 | unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) | 3460 | unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) |
| @@ -3561,6 +3608,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
| 3561 | rdata->pid = pid; | 3608 | rdata->pid = pid; |
| 3562 | rdata->pagesz = PAGE_SIZE; | 3609 | rdata->pagesz = PAGE_SIZE; |
| 3563 | rdata->read_into_pages = cifs_readpages_read_into_pages; | 3610 | rdata->read_into_pages = cifs_readpages_read_into_pages; |
| 3611 | rdata->copy_into_pages = cifs_readpages_copy_into_pages; | ||
| 3564 | rdata->credits = credits; | 3612 | rdata->credits = credits; |
| 3565 | 3613 | ||
| 3566 | list_for_each_entry_safe(page, tpage, &tmplist, lru) { | 3614 | list_for_each_entry_safe(page, tpage, &tmplist, lru) { |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 8f6a2a5863b9..a27fc8791551 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
| @@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file) | |||
| 285 | rc = -ENOMEM; | 285 | rc = -ENOMEM; |
| 286 | goto error_exit; | 286 | goto error_exit; |
| 287 | } | 287 | } |
| 288 | spin_lock_init(&cifsFile->file_info_lock); | ||
| 288 | file->private_data = cifsFile; | 289 | file->private_data = cifsFile; |
| 289 | cifsFile->tlink = cifs_get_tlink(tlink); | 290 | cifsFile->tlink = cifs_get_tlink(tlink); |
| 290 | tcon = tlink_tcon(tlink); | 291 | tcon = tlink_tcon(tlink); |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 538d9b55699a..dcbcc927399a 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
| @@ -344,13 +344,12 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, | |||
| 344 | /* BB is NTLMV2 session security format easier to use here? */ | 344 | /* BB is NTLMV2 session security format easier to use here? */ |
| 345 | flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | | 345 | flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | |
| 346 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | | 346 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | |
| 347 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; | 347 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | |
| 348 | if (ses->server->sign) { | 348 | NTLMSSP_NEGOTIATE_SEAL; |
| 349 | if (ses->server->sign) | ||
| 349 | flags |= NTLMSSP_NEGOTIATE_SIGN; | 350 | flags |= NTLMSSP_NEGOTIATE_SIGN; |
| 350 | if (!ses->server->session_estab || | 351 | if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) |
| 351 | ses->ntlmssp->sesskey_per_smbsess) | 352 | flags |= NTLMSSP_NEGOTIATE_KEY_XCH; |
| 352 | flags |= NTLMSSP_NEGOTIATE_KEY_XCH; | ||
| 353 | } | ||
| 354 | 353 | ||
| 355 | sec_blob->NegotiateFlags = cpu_to_le32(flags); | 354 | sec_blob->NegotiateFlags = cpu_to_le32(flags); |
| 356 | 355 | ||
| @@ -407,13 +406,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, | |||
| 407 | flags = NTLMSSP_NEGOTIATE_56 | | 406 | flags = NTLMSSP_NEGOTIATE_56 | |
| 408 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | | 407 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | |
| 409 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | | 408 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | |
| 410 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; | 409 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | |
| 411 | if (ses->server->sign) { | 410 | NTLMSSP_NEGOTIATE_SEAL; |
| 411 | if (ses->server->sign) | ||
| 412 | flags |= NTLMSSP_NEGOTIATE_SIGN; | 412 | flags |= NTLMSSP_NEGOTIATE_SIGN; |
| 413 | if (!ses->server->session_estab || | 413 | if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) |
| 414 | ses->ntlmssp->sesskey_per_smbsess) | 414 | flags |= NTLMSSP_NEGOTIATE_KEY_XCH; |
| 415 | flags |= NTLMSSP_NEGOTIATE_KEY_XCH; | ||
| 416 | } | ||
| 417 | 415 | ||
| 418 | tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); | 416 | tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); |
| 419 | sec_blob->NegotiateFlags = cpu_to_le32(flags); | 417 | sec_blob->NegotiateFlags = cpu_to_le32(flags); |
| @@ -652,6 +650,7 @@ sess_sendreceive(struct sess_data *sess_data) | |||
| 652 | int rc; | 650 | int rc; |
| 653 | struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base; | 651 | struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base; |
| 654 | __u16 count; | 652 | __u16 count; |
| 653 | struct kvec rsp_iov = { NULL, 0 }; | ||
| 655 | 654 | ||
| 656 | count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; | 655 | count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; |
| 657 | smb_buf->smb_buf_length = | 656 | smb_buf->smb_buf_length = |
| @@ -661,7 +660,9 @@ sess_sendreceive(struct sess_data *sess_data) | |||
| 661 | rc = SendReceive2(sess_data->xid, sess_data->ses, | 660 | rc = SendReceive2(sess_data->xid, sess_data->ses, |
| 662 | sess_data->iov, 3 /* num_iovecs */, | 661 | sess_data->iov, 3 /* num_iovecs */, |
| 663 | &sess_data->buf0_type, | 662 | &sess_data->buf0_type, |
| 664 | CIFS_LOG_ERROR); | 663 | CIFS_LOG_ERROR, &rsp_iov); |
| 664 | cifs_small_buf_release(sess_data->iov[0].iov_base); | ||
| 665 | memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); | ||
| 665 | 666 | ||
| 666 | return rc; | 667 | return rc; |
| 667 | } | 668 | } |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index fc537c29044e..67a987e4d026 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
| @@ -36,11 +36,11 @@ | |||
| 36 | * SMB_COM_NT_CANCEL request and then sends it. | 36 | * SMB_COM_NT_CANCEL request and then sends it. |
| 37 | */ | 37 | */ |
| 38 | static int | 38 | static int |
| 39 | send_nt_cancel(struct TCP_Server_Info *server, void *buf, | 39 | send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst, |
| 40 | struct mid_q_entry *mid) | 40 | struct mid_q_entry *mid) |
| 41 | { | 41 | { |
| 42 | int rc = 0; | 42 | int rc = 0; |
| 43 | struct smb_hdr *in_buf = (struct smb_hdr *)buf; | 43 | struct smb_hdr *in_buf = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
| 44 | 44 | ||
| 45 | /* -4 for RFC1001 length and +2 for BCC field */ | 45 | /* -4 for RFC1001 length and +2 for BCC field */ |
| 46 | in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2); | 46 | in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2); |
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index 0ffa18094335..401a5d856636 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h | |||
| @@ -61,4 +61,9 @@ | |||
| 61 | /* Maximum buffer size value we can send with 1 credit */ | 61 | /* Maximum buffer size value we can send with 1 credit */ |
| 62 | #define SMB2_MAX_BUFFER_SIZE 65536 | 62 | #define SMB2_MAX_BUFFER_SIZE 65536 |
| 63 | 63 | ||
| 64 | static inline struct smb2_sync_hdr *get_sync_hdr(void *buf) | ||
| 65 | { | ||
| 66 | return &(((struct smb2_hdr *)buf)->sync_hdr); | ||
| 67 | } | ||
| 68 | |||
| 64 | #endif /* _SMB2_GLOB_H */ | 69 | #endif /* _SMB2_GLOB_H */ |
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index 8257a5a97cc0..3030a9dfb0dd 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include "smb2pdu.h" | 26 | #include "smb2pdu.h" |
| 27 | #include "smb2proto.h" | 27 | #include "smb2proto.h" |
| 28 | #include "smb2status.h" | 28 | #include "smb2status.h" |
| 29 | #include "smb2glob.h" | ||
| 29 | 30 | ||
| 30 | struct status_to_posix_error { | 31 | struct status_to_posix_error { |
| 31 | __le32 smb2_status; | 32 | __le32 smb2_status; |
| @@ -2449,10 +2450,10 @@ smb2_print_status(__le32 status) | |||
| 2449 | int | 2450 | int |
| 2450 | map_smb2_to_linux_error(char *buf, bool log_err) | 2451 | map_smb2_to_linux_error(char *buf, bool log_err) |
| 2451 | { | 2452 | { |
| 2452 | struct smb2_hdr *hdr = (struct smb2_hdr *)buf; | 2453 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); |
| 2453 | unsigned int i; | 2454 | unsigned int i; |
| 2454 | int rc = -EIO; | 2455 | int rc = -EIO; |
| 2455 | __le32 smb2err = hdr->Status; | 2456 | __le32 smb2err = shdr->Status; |
| 2456 | 2457 | ||
| 2457 | if (smb2err == 0) | 2458 | if (smb2err == 0) |
| 2458 | return 0; | 2459 | return 0; |
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 3d383489b9cf..fd516ea8b8f8 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
| @@ -28,31 +28,32 @@ | |||
| 28 | #include "cifs_debug.h" | 28 | #include "cifs_debug.h" |
| 29 | #include "cifs_unicode.h" | 29 | #include "cifs_unicode.h" |
| 30 | #include "smb2status.h" | 30 | #include "smb2status.h" |
| 31 | #include "smb2glob.h" | ||
| 31 | 32 | ||
| 32 | static int | 33 | static int |
| 33 | check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) | 34 | check_smb2_hdr(struct smb2_sync_hdr *shdr, __u64 mid) |
| 34 | { | 35 | { |
| 35 | __u64 wire_mid = le64_to_cpu(hdr->MessageId); | 36 | __u64 wire_mid = le64_to_cpu(shdr->MessageId); |
| 36 | 37 | ||
| 37 | /* | 38 | /* |
| 38 | * Make sure that this really is an SMB, that it is a response, | 39 | * Make sure that this really is an SMB, that it is a response, |
| 39 | * and that the message ids match. | 40 | * and that the message ids match. |
| 40 | */ | 41 | */ |
| 41 | if ((hdr->ProtocolId == SMB2_PROTO_NUMBER) && | 42 | if ((shdr->ProtocolId == SMB2_PROTO_NUMBER) && |
| 42 | (mid == wire_mid)) { | 43 | (mid == wire_mid)) { |
| 43 | if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) | 44 | if (shdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) |
| 44 | return 0; | 45 | return 0; |
| 45 | else { | 46 | else { |
| 46 | /* only one valid case where server sends us request */ | 47 | /* only one valid case where server sends us request */ |
| 47 | if (hdr->Command == SMB2_OPLOCK_BREAK) | 48 | if (shdr->Command == SMB2_OPLOCK_BREAK) |
| 48 | return 0; | 49 | return 0; |
| 49 | else | 50 | else |
| 50 | cifs_dbg(VFS, "Received Request not response\n"); | 51 | cifs_dbg(VFS, "Received Request not response\n"); |
| 51 | } | 52 | } |
| 52 | } else { /* bad signature or mid */ | 53 | } else { /* bad signature or mid */ |
| 53 | if (hdr->ProtocolId != SMB2_PROTO_NUMBER) | 54 | if (shdr->ProtocolId != SMB2_PROTO_NUMBER) |
| 54 | cifs_dbg(VFS, "Bad protocol string signature header %x\n", | 55 | cifs_dbg(VFS, "Bad protocol string signature header %x\n", |
| 55 | le32_to_cpu(hdr->ProtocolId)); | 56 | le32_to_cpu(shdr->ProtocolId)); |
| 56 | if (mid != wire_mid) | 57 | if (mid != wire_mid) |
| 57 | cifs_dbg(VFS, "Mids do not match: %llu and %llu\n", | 58 | cifs_dbg(VFS, "Mids do not match: %llu and %llu\n", |
| 58 | mid, wire_mid); | 59 | mid, wire_mid); |
| @@ -95,8 +96,9 @@ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { | |||
| 95 | int | 96 | int |
| 96 | smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) | 97 | smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) |
| 97 | { | 98 | { |
| 98 | struct smb2_hdr *hdr = (struct smb2_hdr *)buf; | 99 | struct smb2_pdu *pdu = (struct smb2_pdu *)buf; |
| 99 | struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; | 100 | struct smb2_hdr *hdr = &pdu->hdr; |
| 101 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); | ||
| 100 | __u64 mid; | 102 | __u64 mid; |
| 101 | __u32 len = get_rfc1002_length(buf); | 103 | __u32 len = get_rfc1002_length(buf); |
| 102 | __u32 clc_len; /* calculated length */ | 104 | __u32 clc_len; /* calculated length */ |
| @@ -111,7 +113,7 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) | |||
| 111 | * ie Validate the wct via smb2_struct_sizes table above | 113 | * ie Validate the wct via smb2_struct_sizes table above |
| 112 | */ | 114 | */ |
| 113 | 115 | ||
| 114 | if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { | 116 | if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { |
| 115 | struct smb2_transform_hdr *thdr = | 117 | struct smb2_transform_hdr *thdr = |
| 116 | (struct smb2_transform_hdr *)buf; | 118 | (struct smb2_transform_hdr *)buf; |
| 117 | struct cifs_ses *ses = NULL; | 119 | struct cifs_ses *ses = NULL; |
| @@ -133,10 +135,10 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) | |||
| 133 | } | 135 | } |
| 134 | } | 136 | } |
| 135 | 137 | ||
| 136 | 138 | mid = le64_to_cpu(shdr->MessageId); | |
| 137 | mid = le64_to_cpu(hdr->MessageId); | ||
| 138 | if (length < sizeof(struct smb2_pdu)) { | 139 | if (length < sizeof(struct smb2_pdu)) { |
| 139 | if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { | 140 | if ((length >= sizeof(struct smb2_hdr)) |
| 141 | && (shdr->Status != 0)) { | ||
| 140 | pdu->StructureSize2 = 0; | 142 | pdu->StructureSize2 = 0; |
| 141 | /* | 143 | /* |
| 142 | * As with SMB/CIFS, on some error cases servers may | 144 | * As with SMB/CIFS, on some error cases servers may |
| @@ -154,29 +156,30 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) | |||
| 154 | return 1; | 156 | return 1; |
| 155 | } | 157 | } |
| 156 | 158 | ||
| 157 | if (check_smb2_hdr(hdr, mid)) | 159 | if (check_smb2_hdr(shdr, mid)) |
| 158 | return 1; | 160 | return 1; |
| 159 | 161 | ||
| 160 | if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { | 162 | if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { |
| 161 | cifs_dbg(VFS, "Illegal structure size %u\n", | 163 | cifs_dbg(VFS, "Illegal structure size %u\n", |
| 162 | le16_to_cpu(hdr->StructureSize)); | 164 | le16_to_cpu(shdr->StructureSize)); |
| 163 | return 1; | 165 | return 1; |
| 164 | } | 166 | } |
| 165 | 167 | ||
| 166 | command = le16_to_cpu(hdr->Command); | 168 | command = le16_to_cpu(shdr->Command); |
| 167 | if (command >= NUMBER_OF_SMB2_COMMANDS) { | 169 | if (command >= NUMBER_OF_SMB2_COMMANDS) { |
| 168 | cifs_dbg(VFS, "Illegal SMB2 command %d\n", command); | 170 | cifs_dbg(VFS, "Illegal SMB2 command %d\n", command); |
| 169 | return 1; | 171 | return 1; |
| 170 | } | 172 | } |
| 171 | 173 | ||
| 172 | if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) { | 174 | if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) { |
| 173 | if (command != SMB2_OPLOCK_BREAK_HE && (hdr->Status == 0 || | 175 | if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 || |
| 174 | pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) { | 176 | pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) { |
| 175 | /* error packets have 9 byte structure size */ | 177 | /* error packets have 9 byte structure size */ |
| 176 | cifs_dbg(VFS, "Illegal response size %u for command %d\n", | 178 | cifs_dbg(VFS, "Illegal response size %u for command %d\n", |
| 177 | le16_to_cpu(pdu->StructureSize2), command); | 179 | le16_to_cpu(pdu->StructureSize2), command); |
| 178 | return 1; | 180 | return 1; |
| 179 | } else if (command == SMB2_OPLOCK_BREAK_HE && (hdr->Status == 0) | 181 | } else if (command == SMB2_OPLOCK_BREAK_HE |
| 182 | && (shdr->Status == 0) | ||
| 180 | && (le16_to_cpu(pdu->StructureSize2) != 44) | 183 | && (le16_to_cpu(pdu->StructureSize2) != 44) |
| 181 | && (le16_to_cpu(pdu->StructureSize2) != 36)) { | 184 | && (le16_to_cpu(pdu->StructureSize2) != 36)) { |
| 182 | /* special case for SMB2.1 lease break message */ | 185 | /* special case for SMB2.1 lease break message */ |
| @@ -199,7 +202,7 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr) | |||
| 199 | clc_len, 4 + len, mid); | 202 | clc_len, 4 + len, mid); |
| 200 | /* create failed on symlink */ | 203 | /* create failed on symlink */ |
| 201 | if (command == SMB2_CREATE_HE && | 204 | if (command == SMB2_CREATE_HE && |
| 202 | hdr->Status == STATUS_STOPPED_ON_SYMLINK) | 205 | shdr->Status == STATUS_STOPPED_ON_SYMLINK) |
| 203 | return 0; | 206 | return 0; |
| 204 | /* Windows 7 server returns 24 bytes more */ | 207 | /* Windows 7 server returns 24 bytes more */ |
| 205 | if (clc_len + 20 == len && command == SMB2_OPLOCK_BREAK_HE) | 208 | if (clc_len + 20 == len && command == SMB2_OPLOCK_BREAK_HE) |
| @@ -261,11 +264,12 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = { | |||
| 261 | char * | 264 | char * |
| 262 | smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) | 265 | smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) |
| 263 | { | 266 | { |
| 267 | struct smb2_sync_hdr *shdr = get_sync_hdr(hdr); | ||
| 264 | *off = 0; | 268 | *off = 0; |
| 265 | *len = 0; | 269 | *len = 0; |
| 266 | 270 | ||
| 267 | /* error responses do not have data area */ | 271 | /* error responses do not have data area */ |
| 268 | if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED && | 272 | if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED && |
| 269 | (((struct smb2_err_rsp *)hdr)->StructureSize) == | 273 | (((struct smb2_err_rsp *)hdr)->StructureSize) == |
| 270 | SMB2_ERROR_STRUCTURE_SIZE2) | 274 | SMB2_ERROR_STRUCTURE_SIZE2) |
| 271 | return NULL; | 275 | return NULL; |
| @@ -275,7 +279,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) | |||
| 275 | * of the data buffer offset and data buffer length for the particular | 279 | * of the data buffer offset and data buffer length for the particular |
| 276 | * command. | 280 | * command. |
| 277 | */ | 281 | */ |
| 278 | switch (hdr->Command) { | 282 | switch (shdr->Command) { |
| 279 | case SMB2_NEGOTIATE: | 283 | case SMB2_NEGOTIATE: |
| 280 | *off = le16_to_cpu( | 284 | *off = le16_to_cpu( |
| 281 | ((struct smb2_negotiate_rsp *)hdr)->SecurityBufferOffset); | 285 | ((struct smb2_negotiate_rsp *)hdr)->SecurityBufferOffset); |
| @@ -346,7 +350,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) | |||
| 346 | 350 | ||
| 347 | /* return pointer to beginning of data area, ie offset from SMB start */ | 351 | /* return pointer to beginning of data area, ie offset from SMB start */ |
| 348 | if ((*off != 0) && (*len != 0)) | 352 | if ((*off != 0) && (*len != 0)) |
| 349 | return (char *)(&hdr->ProtocolId) + *off; | 353 | return (char *)shdr + *off; |
| 350 | else | 354 | else |
| 351 | return NULL; | 355 | return NULL; |
| 352 | } | 356 | } |
| @@ -358,12 +362,13 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) | |||
| 358 | unsigned int | 362 | unsigned int |
| 359 | smb2_calc_size(void *buf) | 363 | smb2_calc_size(void *buf) |
| 360 | { | 364 | { |
| 361 | struct smb2_hdr *hdr = (struct smb2_hdr *)buf; | 365 | struct smb2_pdu *pdu = (struct smb2_pdu *)buf; |
| 362 | struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; | 366 | struct smb2_hdr *hdr = &pdu->hdr; |
| 367 | struct smb2_sync_hdr *shdr = get_sync_hdr(hdr); | ||
| 363 | int offset; /* the offset from the beginning of SMB to data area */ | 368 | int offset; /* the offset from the beginning of SMB to data area */ |
| 364 | int data_length; /* the length of the variable length data area */ | 369 | int data_length; /* the length of the variable length data area */ |
| 365 | /* Structure Size has already been checked to make sure it is 64 */ | 370 | /* Structure Size has already been checked to make sure it is 64 */ |
| 366 | int len = 4 + le16_to_cpu(pdu->hdr.StructureSize); | 371 | int len = 4 + le16_to_cpu(shdr->StructureSize); |
| 367 | 372 | ||
| 368 | /* | 373 | /* |
| 369 | * StructureSize2, ie length of fixed parameter area has already | 374 | * StructureSize2, ie length of fixed parameter area has already |
| @@ -371,7 +376,7 @@ smb2_calc_size(void *buf) | |||
| 371 | */ | 376 | */ |
| 372 | len += le16_to_cpu(pdu->StructureSize2); | 377 | len += le16_to_cpu(pdu->StructureSize2); |
| 373 | 378 | ||
| 374 | if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false) | 379 | if (has_smb2_data_area[le16_to_cpu(shdr->Command)] == false) |
| 375 | goto calc_size_exit; | 380 | goto calc_size_exit; |
| 376 | 381 | ||
| 377 | smb2_get_data_area_len(&offset, &data_length, hdr); | 382 | smb2_get_data_area_len(&offset, &data_length, hdr); |
| @@ -582,7 +587,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) | |||
| 582 | 587 | ||
| 583 | cifs_dbg(FYI, "Checking for oplock break\n"); | 588 | cifs_dbg(FYI, "Checking for oplock break\n"); |
| 584 | 589 | ||
| 585 | if (rsp->hdr.Command != SMB2_OPLOCK_BREAK) | 590 | if (rsp->hdr.sync_hdr.Command != SMB2_OPLOCK_BREAK) |
| 586 | return false; | 591 | return false; |
| 587 | 592 | ||
| 588 | if (rsp->StructureSize != | 593 | if (rsp->StructureSize != |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 5d456ebb3813..a44b4dbe4aae 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/pagemap.h> | 20 | #include <linux/pagemap.h> |
| 21 | #include <linux/vfs.h> | 21 | #include <linux/vfs.h> |
| 22 | #include <linux/falloc.h> | 22 | #include <linux/falloc.h> |
| 23 | #include <linux/scatterlist.h> | ||
| 24 | #include <crypto/aead.h> | ||
| 23 | #include "cifsglob.h" | 25 | #include "cifsglob.h" |
| 24 | #include "smb2pdu.h" | 26 | #include "smb2pdu.h" |
| 25 | #include "smb2proto.h" | 27 | #include "smb2proto.h" |
| @@ -119,7 +121,9 @@ smb2_get_credits_field(struct TCP_Server_Info *server, const int optype) | |||
| 119 | static unsigned int | 121 | static unsigned int |
| 120 | smb2_get_credits(struct mid_q_entry *mid) | 122 | smb2_get_credits(struct mid_q_entry *mid) |
| 121 | { | 123 | { |
| 122 | return le16_to_cpu(((struct smb2_hdr *)mid->resp_buf)->CreditRequest); | 124 | struct smb2_sync_hdr *shdr = get_sync_hdr(mid->resp_buf); |
| 125 | |||
| 126 | return le16_to_cpu(shdr->CreditRequest); | ||
| 123 | } | 127 | } |
| 124 | 128 | ||
| 125 | static int | 129 | static int |
| @@ -184,10 +188,10 @@ static struct mid_q_entry * | |||
| 184 | smb2_find_mid(struct TCP_Server_Info *server, char *buf) | 188 | smb2_find_mid(struct TCP_Server_Info *server, char *buf) |
| 185 | { | 189 | { |
| 186 | struct mid_q_entry *mid; | 190 | struct mid_q_entry *mid; |
| 187 | struct smb2_hdr *hdr = (struct smb2_hdr *)buf; | 191 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); |
| 188 | __u64 wire_mid = le64_to_cpu(hdr->MessageId); | 192 | __u64 wire_mid = le64_to_cpu(shdr->MessageId); |
| 189 | 193 | ||
| 190 | if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { | 194 | if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) { |
| 191 | cifs_dbg(VFS, "encrypted frame parsing not supported yet"); | 195 | cifs_dbg(VFS, "encrypted frame parsing not supported yet"); |
| 192 | return NULL; | 196 | return NULL; |
| 193 | } | 197 | } |
| @@ -196,7 +200,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf) | |||
| 196 | list_for_each_entry(mid, &server->pending_mid_q, qhead) { | 200 | list_for_each_entry(mid, &server->pending_mid_q, qhead) { |
| 197 | if ((mid->mid == wire_mid) && | 201 | if ((mid->mid == wire_mid) && |
| 198 | (mid->mid_state == MID_REQUEST_SUBMITTED) && | 202 | (mid->mid_state == MID_REQUEST_SUBMITTED) && |
| 199 | (mid->command == hdr->Command)) { | 203 | (mid->command == shdr->Command)) { |
| 200 | spin_unlock(&GlobalMid_Lock); | 204 | spin_unlock(&GlobalMid_Lock); |
| 201 | return mid; | 205 | return mid; |
| 202 | } | 206 | } |
| @@ -209,12 +213,12 @@ static void | |||
| 209 | smb2_dump_detail(void *buf) | 213 | smb2_dump_detail(void *buf) |
| 210 | { | 214 | { |
| 211 | #ifdef CONFIG_CIFS_DEBUG2 | 215 | #ifdef CONFIG_CIFS_DEBUG2 |
| 212 | struct smb2_hdr *smb = (struct smb2_hdr *)buf; | 216 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); |
| 213 | 217 | ||
| 214 | cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n", | 218 | cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n", |
| 215 | smb->Command, smb->Status, smb->Flags, smb->MessageId, | 219 | shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId, |
| 216 | smb->ProcessId); | 220 | shdr->ProcessId); |
| 217 | cifs_dbg(VFS, "smb buf %p len %u\n", smb, smb2_calc_size(smb)); | 221 | cifs_dbg(VFS, "smb buf %p len %u\n", buf, smb2_calc_size(buf)); |
| 218 | #endif | 222 | #endif |
| 219 | } | 223 | } |
| 220 | 224 | ||
| @@ -1002,14 +1006,14 @@ smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1002 | static bool | 1006 | static bool |
| 1003 | smb2_is_status_pending(char *buf, struct TCP_Server_Info *server, int length) | 1007 | smb2_is_status_pending(char *buf, struct TCP_Server_Info *server, int length) |
| 1004 | { | 1008 | { |
| 1005 | struct smb2_hdr *hdr = (struct smb2_hdr *)buf; | 1009 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); |
| 1006 | 1010 | ||
| 1007 | if (hdr->Status != STATUS_PENDING) | 1011 | if (shdr->Status != STATUS_PENDING) |
| 1008 | return false; | 1012 | return false; |
| 1009 | 1013 | ||
| 1010 | if (!length) { | 1014 | if (!length) { |
| 1011 | spin_lock(&server->req_lock); | 1015 | spin_lock(&server->req_lock); |
| 1012 | server->credits += le16_to_cpu(hdr->CreditRequest); | 1016 | server->credits += le16_to_cpu(shdr->CreditRequest); |
| 1013 | spin_unlock(&server->req_lock); | 1017 | spin_unlock(&server->req_lock); |
| 1014 | wake_up(&server->request_q); | 1018 | wake_up(&server->request_q); |
| 1015 | } | 1019 | } |
| @@ -1545,6 +1549,633 @@ smb2_dir_needs_close(struct cifsFileInfo *cfile) | |||
| 1545 | return !cfile->invalidHandle; | 1549 | return !cfile->invalidHandle; |
| 1546 | } | 1550 | } |
| 1547 | 1551 | ||
| 1552 | static void | ||
| 1553 | fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, struct smb_rqst *old_rq) | ||
| 1554 | { | ||
| 1555 | struct smb2_sync_hdr *shdr = | ||
| 1556 | (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base; | ||
| 1557 | unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base); | ||
| 1558 | |||
| 1559 | memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr)); | ||
| 1560 | tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM; | ||
| 1561 | tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len); | ||
| 1562 | tr_hdr->Flags = cpu_to_le16(0x01); | ||
| 1563 | get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CMM_NONCE); | ||
| 1564 | memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8); | ||
| 1565 | inc_rfc1001_len(tr_hdr, sizeof(struct smb2_transform_hdr) - 4); | ||
| 1566 | inc_rfc1001_len(tr_hdr, orig_len); | ||
| 1567 | } | ||
| 1568 | |||
| 1569 | static struct scatterlist * | ||
| 1570 | init_sg(struct smb_rqst *rqst, u8 *sign) | ||
| 1571 | { | ||
| 1572 | unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1; | ||
| 1573 | unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24; | ||
| 1574 | struct scatterlist *sg; | ||
| 1575 | unsigned int i; | ||
| 1576 | unsigned int j; | ||
| 1577 | |||
| 1578 | sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL); | ||
| 1579 | if (!sg) | ||
| 1580 | return NULL; | ||
| 1581 | |||
| 1582 | sg_init_table(sg, sg_len); | ||
| 1583 | sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len); | ||
| 1584 | for (i = 1; i < rqst->rq_nvec; i++) | ||
| 1585 | sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base, | ||
| 1586 | rqst->rq_iov[i].iov_len); | ||
| 1587 | for (j = 0; i < sg_len - 1; i++, j++) { | ||
| 1588 | unsigned int len = (j < rqst->rq_npages - 1) ? rqst->rq_pagesz | ||
| 1589 | : rqst->rq_tailsz; | ||
| 1590 | sg_set_page(&sg[i], rqst->rq_pages[j], len, 0); | ||
| 1591 | } | ||
| 1592 | sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE); | ||
| 1593 | return sg; | ||
| 1594 | } | ||
| 1595 | |||
| 1596 | struct cifs_crypt_result { | ||
| 1597 | int err; | ||
| 1598 | struct completion completion; | ||
| 1599 | }; | ||
| 1600 | |||
| 1601 | static void cifs_crypt_complete(struct crypto_async_request *req, int err) | ||
| 1602 | { | ||
| 1603 | struct cifs_crypt_result *res = req->data; | ||
| 1604 | |||
| 1605 | if (err == -EINPROGRESS) | ||
| 1606 | return; | ||
| 1607 | |||
| 1608 | res->err = err; | ||
| 1609 | complete(&res->completion); | ||
| 1610 | } | ||
| 1611 | |||
| 1612 | /* | ||
| 1613 | * Encrypt or decrypt @rqst message. @rqst has the following format: | ||
| 1614 | * iov[0] - transform header (associate data), | ||
| 1615 | * iov[1-N] and pages - data to encrypt. | ||
| 1616 | * On success return encrypted data in iov[1-N] and pages, leave iov[0] | ||
| 1617 | * untouched. | ||
| 1618 | */ | ||
| 1619 | static int | ||
| 1620 | crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) | ||
| 1621 | { | ||
| 1622 | struct smb2_transform_hdr *tr_hdr = | ||
| 1623 | (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base; | ||
| 1624 | unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24; | ||
| 1625 | struct cifs_ses *ses; | ||
| 1626 | int rc = 0; | ||
| 1627 | struct scatterlist *sg; | ||
| 1628 | u8 sign[SMB2_SIGNATURE_SIZE] = {}; | ||
| 1629 | struct aead_request *req; | ||
| 1630 | char *iv; | ||
| 1631 | unsigned int iv_len; | ||
| 1632 | struct cifs_crypt_result result = {0, }; | ||
| 1633 | struct crypto_aead *tfm; | ||
| 1634 | unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); | ||
| 1635 | |||
| 1636 | init_completion(&result.completion); | ||
| 1637 | |||
| 1638 | ses = smb2_find_smb_ses(server, tr_hdr->SessionId); | ||
| 1639 | if (!ses) { | ||
| 1640 | cifs_dbg(VFS, "%s: Could not find session\n", __func__); | ||
| 1641 | return 0; | ||
| 1642 | } | ||
| 1643 | |||
| 1644 | rc = smb3_crypto_aead_allocate(server); | ||
| 1645 | if (rc) { | ||
| 1646 | cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__); | ||
| 1647 | return rc; | ||
| 1648 | } | ||
| 1649 | |||
| 1650 | tfm = enc ? server->secmech.ccmaesencrypt : | ||
| 1651 | server->secmech.ccmaesdecrypt; | ||
| 1652 | rc = crypto_aead_setkey(tfm, enc ? ses->smb3encryptionkey : | ||
| 1653 | ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE); | ||
| 1654 | if (rc) { | ||
| 1655 | cifs_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc); | ||
| 1656 | return rc; | ||
| 1657 | } | ||
| 1658 | |||
| 1659 | rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE); | ||
| 1660 | if (rc) { | ||
| 1661 | cifs_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc); | ||
| 1662 | return rc; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | req = aead_request_alloc(tfm, GFP_KERNEL); | ||
| 1666 | if (!req) { | ||
| 1667 | cifs_dbg(VFS, "%s: Failed to alloc aead request", __func__); | ||
| 1668 | return -ENOMEM; | ||
| 1669 | } | ||
| 1670 | |||
| 1671 | if (!enc) { | ||
| 1672 | memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE); | ||
| 1673 | crypt_len += SMB2_SIGNATURE_SIZE; | ||
| 1674 | } | ||
| 1675 | |||
| 1676 | sg = init_sg(rqst, sign); | ||
| 1677 | if (!sg) { | ||
| 1678 | cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); | ||
| 1679 | goto free_req; | ||
| 1680 | } | ||
| 1681 | |||
| 1682 | iv_len = crypto_aead_ivsize(tfm); | ||
| 1683 | iv = kzalloc(iv_len, GFP_KERNEL); | ||
| 1684 | if (!iv) { | ||
| 1685 | cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); | ||
| 1686 | goto free_sg; | ||
| 1687 | } | ||
| 1688 | iv[0] = 3; | ||
| 1689 | memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CMM_NONCE); | ||
| 1690 | |||
| 1691 | aead_request_set_crypt(req, sg, sg, crypt_len, iv); | ||
| 1692 | aead_request_set_ad(req, assoc_data_len); | ||
| 1693 | |||
| 1694 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
| 1695 | cifs_crypt_complete, &result); | ||
| 1696 | |||
| 1697 | rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | ||
| 1698 | |||
| 1699 | if (rc == -EINPROGRESS || rc == -EBUSY) { | ||
| 1700 | wait_for_completion(&result.completion); | ||
| 1701 | rc = result.err; | ||
| 1702 | } | ||
| 1703 | |||
| 1704 | if (!rc && enc) | ||
| 1705 | memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); | ||
| 1706 | |||
| 1707 | kfree(iv); | ||
| 1708 | free_sg: | ||
| 1709 | kfree(sg); | ||
| 1710 | free_req: | ||
| 1711 | kfree(req); | ||
| 1712 | return rc; | ||
| 1713 | } | ||
| 1714 | |||
| 1715 | static int | ||
| 1716 | smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq, | ||
| 1717 | struct smb_rqst *old_rq) | ||
| 1718 | { | ||
| 1719 | struct kvec *iov; | ||
| 1720 | struct page **pages; | ||
| 1721 | struct smb2_transform_hdr *tr_hdr; | ||
| 1722 | unsigned int npages = old_rq->rq_npages; | ||
| 1723 | int i; | ||
| 1724 | int rc = -ENOMEM; | ||
| 1725 | |||
| 1726 | pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); | ||
| 1727 | if (!pages) | ||
| 1728 | return rc; | ||
| 1729 | |||
| 1730 | new_rq->rq_pages = pages; | ||
| 1731 | new_rq->rq_npages = old_rq->rq_npages; | ||
| 1732 | new_rq->rq_pagesz = old_rq->rq_pagesz; | ||
| 1733 | new_rq->rq_tailsz = old_rq->rq_tailsz; | ||
| 1734 | |||
| 1735 | for (i = 0; i < npages; i++) { | ||
| 1736 | pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); | ||
| 1737 | if (!pages[i]) | ||
| 1738 | goto err_free_pages; | ||
| 1739 | } | ||
| 1740 | |||
| 1741 | iov = kmalloc_array(old_rq->rq_nvec, sizeof(struct kvec), GFP_KERNEL); | ||
| 1742 | if (!iov) | ||
| 1743 | goto err_free_pages; | ||
| 1744 | |||
| 1745 | /* copy all iovs from the old except the 1st one (rfc1002 length) */ | ||
| 1746 | memcpy(&iov[1], &old_rq->rq_iov[1], | ||
| 1747 | sizeof(struct kvec) * (old_rq->rq_nvec - 1)); | ||
| 1748 | new_rq->rq_iov = iov; | ||
| 1749 | new_rq->rq_nvec = old_rq->rq_nvec; | ||
| 1750 | |||
| 1751 | tr_hdr = kmalloc(sizeof(struct smb2_transform_hdr), GFP_KERNEL); | ||
| 1752 | if (!tr_hdr) | ||
| 1753 | goto err_free_iov; | ||
| 1754 | |||
| 1755 | /* fill the 1st iov with a transform header */ | ||
| 1756 | fill_transform_hdr(tr_hdr, old_rq); | ||
| 1757 | new_rq->rq_iov[0].iov_base = tr_hdr; | ||
| 1758 | new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr); | ||
| 1759 | |||
| 1760 | /* copy pages form the old */ | ||
| 1761 | for (i = 0; i < npages; i++) { | ||
| 1762 | char *dst = kmap(new_rq->rq_pages[i]); | ||
| 1763 | char *src = kmap(old_rq->rq_pages[i]); | ||
| 1764 | unsigned int len = (i < npages - 1) ? new_rq->rq_pagesz : | ||
| 1765 | new_rq->rq_tailsz; | ||
| 1766 | memcpy(dst, src, len); | ||
| 1767 | kunmap(new_rq->rq_pages[i]); | ||
| 1768 | kunmap(old_rq->rq_pages[i]); | ||
| 1769 | } | ||
| 1770 | |||
| 1771 | rc = crypt_message(server, new_rq, 1); | ||
| 1772 | cifs_dbg(FYI, "encrypt message returned %d", rc); | ||
| 1773 | if (rc) | ||
| 1774 | goto err_free_tr_hdr; | ||
| 1775 | |||
| 1776 | return rc; | ||
| 1777 | |||
| 1778 | err_free_tr_hdr: | ||
| 1779 | kfree(tr_hdr); | ||
| 1780 | err_free_iov: | ||
| 1781 | kfree(iov); | ||
| 1782 | err_free_pages: | ||
| 1783 | for (i = i - 1; i >= 0; i--) | ||
| 1784 | put_page(pages[i]); | ||
| 1785 | kfree(pages); | ||
| 1786 | return rc; | ||
| 1787 | } | ||
| 1788 | |||
| 1789 | static void | ||
| 1790 | smb3_free_transform_rq(struct smb_rqst *rqst) | ||
| 1791 | { | ||
| 1792 | int i = rqst->rq_npages - 1; | ||
| 1793 | |||
| 1794 | for (; i >= 0; i--) | ||
| 1795 | put_page(rqst->rq_pages[i]); | ||
| 1796 | kfree(rqst->rq_pages); | ||
| 1797 | /* free transform header */ | ||
| 1798 | kfree(rqst->rq_iov[0].iov_base); | ||
| 1799 | kfree(rqst->rq_iov); | ||
| 1800 | } | ||
| 1801 | |||
| 1802 | static int | ||
| 1803 | smb3_is_transform_hdr(void *buf) | ||
| 1804 | { | ||
| 1805 | struct smb2_transform_hdr *trhdr = buf; | ||
| 1806 | |||
| 1807 | return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM; | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | static int | ||
| 1811 | decrypt_raw_data(struct TCP_Server_Info *server, char *buf, | ||
| 1812 | unsigned int buf_data_size, struct page **pages, | ||
| 1813 | unsigned int npages, unsigned int page_data_size) | ||
| 1814 | { | ||
| 1815 | struct kvec iov[2]; | ||
| 1816 | struct smb_rqst rqst = {NULL}; | ||
| 1817 | struct smb2_hdr *hdr; | ||
| 1818 | int rc; | ||
| 1819 | |||
| 1820 | iov[0].iov_base = buf; | ||
| 1821 | iov[0].iov_len = sizeof(struct smb2_transform_hdr); | ||
| 1822 | iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr); | ||
| 1823 | iov[1].iov_len = buf_data_size; | ||
| 1824 | |||
| 1825 | rqst.rq_iov = iov; | ||
| 1826 | rqst.rq_nvec = 2; | ||
| 1827 | rqst.rq_pages = pages; | ||
| 1828 | rqst.rq_npages = npages; | ||
| 1829 | rqst.rq_pagesz = PAGE_SIZE; | ||
| 1830 | rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE; | ||
| 1831 | |||
| 1832 | rc = crypt_message(server, &rqst, 0); | ||
| 1833 | cifs_dbg(FYI, "decrypt message returned %d\n", rc); | ||
| 1834 | |||
| 1835 | if (rc) | ||
| 1836 | return rc; | ||
| 1837 | |||
| 1838 | memmove(buf + 4, iov[1].iov_base, buf_data_size); | ||
| 1839 | hdr = (struct smb2_hdr *)buf; | ||
| 1840 | hdr->smb2_buf_length = cpu_to_be32(buf_data_size + page_data_size); | ||
| 1841 | server->total_read = buf_data_size + page_data_size + 4; | ||
| 1842 | |||
| 1843 | return rc; | ||
| 1844 | } | ||
| 1845 | |||
| 1846 | static int | ||
| 1847 | read_data_into_pages(struct TCP_Server_Info *server, struct page **pages, | ||
| 1848 | unsigned int npages, unsigned int len) | ||
| 1849 | { | ||
| 1850 | int i; | ||
| 1851 | int length; | ||
| 1852 | |||
| 1853 | for (i = 0; i < npages; i++) { | ||
| 1854 | struct page *page = pages[i]; | ||
| 1855 | size_t n; | ||
| 1856 | |||
| 1857 | n = len; | ||
| 1858 | if (len >= PAGE_SIZE) { | ||
| 1859 | /* enough data to fill the page */ | ||
| 1860 | n = PAGE_SIZE; | ||
| 1861 | len -= n; | ||
| 1862 | } else { | ||
| 1863 | zero_user(page, len, PAGE_SIZE - len); | ||
| 1864 | len = 0; | ||
| 1865 | } | ||
| 1866 | length = cifs_read_page_from_socket(server, page, n); | ||
| 1867 | if (length < 0) | ||
| 1868 | return length; | ||
| 1869 | server->total_read += length; | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | return 0; | ||
| 1873 | } | ||
| 1874 | |||
| 1875 | static int | ||
| 1876 | init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size, | ||
| 1877 | unsigned int cur_off, struct bio_vec **page_vec) | ||
| 1878 | { | ||
| 1879 | struct bio_vec *bvec; | ||
| 1880 | int i; | ||
| 1881 | |||
| 1882 | bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL); | ||
| 1883 | if (!bvec) | ||
| 1884 | return -ENOMEM; | ||
| 1885 | |||
| 1886 | for (i = 0; i < npages; i++) { | ||
| 1887 | bvec[i].bv_page = pages[i]; | ||
| 1888 | bvec[i].bv_offset = (i == 0) ? cur_off : 0; | ||
| 1889 | bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size); | ||
| 1890 | data_size -= bvec[i].bv_len; | ||
| 1891 | } | ||
| 1892 | |||
| 1893 | if (data_size != 0) { | ||
| 1894 | cifs_dbg(VFS, "%s: something went wrong\n", __func__); | ||
| 1895 | kfree(bvec); | ||
| 1896 | return -EIO; | ||
| 1897 | } | ||
| 1898 | |||
| 1899 | *page_vec = bvec; | ||
| 1900 | return 0; | ||
| 1901 | } | ||
| 1902 | |||
| 1903 | static int | ||
| 1904 | handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | ||
| 1905 | char *buf, unsigned int buf_len, struct page **pages, | ||
| 1906 | unsigned int npages, unsigned int page_data_size) | ||
| 1907 | { | ||
| 1908 | unsigned int data_offset; | ||
| 1909 | unsigned int data_len; | ||
| 1910 | unsigned int cur_off; | ||
| 1911 | unsigned int cur_page_idx; | ||
| 1912 | unsigned int pad_len; | ||
| 1913 | struct cifs_readdata *rdata = mid->callback_data; | ||
| 1914 | struct smb2_sync_hdr *shdr = get_sync_hdr(buf); | ||
| 1915 | struct bio_vec *bvec = NULL; | ||
| 1916 | struct iov_iter iter; | ||
| 1917 | struct kvec iov; | ||
| 1918 | int length; | ||
| 1919 | |||
| 1920 | if (shdr->Command != SMB2_READ) { | ||
| 1921 | cifs_dbg(VFS, "only big read responses are supported\n"); | ||
| 1922 | return -ENOTSUPP; | ||
| 1923 | } | ||
| 1924 | |||
| 1925 | if (server->ops->is_status_pending && | ||
| 1926 | server->ops->is_status_pending(buf, server, 0)) | ||
| 1927 | return -1; | ||
| 1928 | |||
| 1929 | rdata->result = server->ops->map_error(buf, false); | ||
| 1930 | if (rdata->result != 0) { | ||
| 1931 | cifs_dbg(FYI, "%s: server returned error %d\n", | ||
| 1932 | __func__, rdata->result); | ||
| 1933 | dequeue_mid(mid, rdata->result); | ||
| 1934 | return 0; | ||
| 1935 | } | ||
| 1936 | |||
| 1937 | data_offset = server->ops->read_data_offset(buf) + 4; | ||
| 1938 | data_len = server->ops->read_data_length(buf); | ||
| 1939 | |||
| 1940 | if (data_offset < server->vals->read_rsp_size) { | ||
| 1941 | /* | ||
| 1942 | * win2k8 sometimes sends an offset of 0 when the read | ||
| 1943 | * is beyond the EOF. Treat it as if the data starts just after | ||
| 1944 | * the header. | ||
| 1945 | */ | ||
| 1946 | cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", | ||
| 1947 | __func__, data_offset); | ||
| 1948 | data_offset = server->vals->read_rsp_size; | ||
| 1949 | } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { | ||
| 1950 | /* data_offset is beyond the end of smallbuf */ | ||
| 1951 | cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", | ||
| 1952 | __func__, data_offset); | ||
| 1953 | rdata->result = -EIO; | ||
| 1954 | dequeue_mid(mid, rdata->result); | ||
| 1955 | return 0; | ||
| 1956 | } | ||
| 1957 | |||
| 1958 | pad_len = data_offset - server->vals->read_rsp_size; | ||
| 1959 | |||
| 1960 | if (buf_len <= data_offset) { | ||
| 1961 | /* read response payload is in pages */ | ||
| 1962 | cur_page_idx = pad_len / PAGE_SIZE; | ||
| 1963 | cur_off = pad_len % PAGE_SIZE; | ||
| 1964 | |||
| 1965 | if (cur_page_idx != 0) { | ||
| 1966 | /* data offset is beyond the 1st page of response */ | ||
| 1967 | cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n", | ||
| 1968 | __func__, data_offset); | ||
| 1969 | rdata->result = -EIO; | ||
| 1970 | dequeue_mid(mid, rdata->result); | ||
| 1971 | return 0; | ||
| 1972 | } | ||
| 1973 | |||
| 1974 | if (data_len > page_data_size - pad_len) { | ||
| 1975 | /* data_len is corrupt -- discard frame */ | ||
| 1976 | rdata->result = -EIO; | ||
| 1977 | dequeue_mid(mid, rdata->result); | ||
| 1978 | return 0; | ||
| 1979 | } | ||
| 1980 | |||
| 1981 | rdata->result = init_read_bvec(pages, npages, page_data_size, | ||
| 1982 | cur_off, &bvec); | ||
| 1983 | if (rdata->result != 0) { | ||
| 1984 | dequeue_mid(mid, rdata->result); | ||
| 1985 | return 0; | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | iov_iter_bvec(&iter, WRITE | ITER_BVEC, bvec, npages, data_len); | ||
| 1989 | } else if (buf_len >= data_offset + data_len) { | ||
| 1990 | /* read response payload is in buf */ | ||
| 1991 | WARN_ONCE(npages > 0, "read data can be either in buf or in pages"); | ||
| 1992 | iov.iov_base = buf + data_offset; | ||
| 1993 | iov.iov_len = data_len; | ||
| 1994 | iov_iter_kvec(&iter, WRITE | ITER_KVEC, &iov, 1, data_len); | ||
| 1995 | } else { | ||
| 1996 | /* read response payload cannot be in both buf and pages */ | ||
| 1997 | WARN_ONCE(1, "buf can not contain only a part of read data"); | ||
| 1998 | rdata->result = -EIO; | ||
| 1999 | dequeue_mid(mid, rdata->result); | ||
| 2000 | return 0; | ||
| 2001 | } | ||
| 2002 | |||
| 2003 | /* set up first iov for signature check */ | ||
| 2004 | rdata->iov[0].iov_base = buf; | ||
| 2005 | rdata->iov[0].iov_len = 4; | ||
| 2006 | rdata->iov[1].iov_base = buf + 4; | ||
| 2007 | rdata->iov[1].iov_len = server->vals->read_rsp_size - 4; | ||
| 2008 | cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
| 2009 | rdata->iov[0].iov_base, server->vals->read_rsp_size); | ||
| 2010 | |||
| 2011 | length = rdata->copy_into_pages(server, rdata, &iter); | ||
| 2012 | |||
| 2013 | kfree(bvec); | ||
| 2014 | |||
| 2015 | if (length < 0) | ||
| 2016 | return length; | ||
| 2017 | |||
| 2018 | dequeue_mid(mid, false); | ||
| 2019 | return length; | ||
| 2020 | } | ||
| 2021 | |||
| 2022 | static int | ||
| 2023 | receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid) | ||
| 2024 | { | ||
| 2025 | char *buf = server->smallbuf; | ||
| 2026 | struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; | ||
| 2027 | unsigned int npages; | ||
| 2028 | struct page **pages; | ||
| 2029 | unsigned int len; | ||
| 2030 | unsigned int buflen = get_rfc1002_length(buf) + 4; | ||
| 2031 | int rc; | ||
| 2032 | int i = 0; | ||
| 2033 | |||
| 2034 | len = min_t(unsigned int, buflen, server->vals->read_rsp_size - 4 + | ||
| 2035 | sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1; | ||
| 2036 | |||
| 2037 | rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len); | ||
| 2038 | if (rc < 0) | ||
| 2039 | return rc; | ||
| 2040 | server->total_read += rc; | ||
| 2041 | |||
| 2042 | len = le32_to_cpu(tr_hdr->OriginalMessageSize) + 4 - | ||
| 2043 | server->vals->read_rsp_size; | ||
| 2044 | npages = DIV_ROUND_UP(len, PAGE_SIZE); | ||
| 2045 | |||
| 2046 | pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); | ||
| 2047 | if (!pages) { | ||
| 2048 | rc = -ENOMEM; | ||
| 2049 | goto discard_data; | ||
| 2050 | } | ||
| 2051 | |||
| 2052 | for (; i < npages; i++) { | ||
| 2053 | pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM); | ||
| 2054 | if (!pages[i]) { | ||
| 2055 | rc = -ENOMEM; | ||
| 2056 | goto discard_data; | ||
| 2057 | } | ||
| 2058 | } | ||
| 2059 | |||
| 2060 | /* read read data into pages */ | ||
| 2061 | rc = read_data_into_pages(server, pages, npages, len); | ||
| 2062 | if (rc) | ||
| 2063 | goto free_pages; | ||
| 2064 | |||
| 2065 | rc = cifs_discard_remaining_data(server); | ||
| 2066 | if (rc) | ||
| 2067 | goto free_pages; | ||
| 2068 | |||
| 2069 | rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size - 4, | ||
| 2070 | pages, npages, len); | ||
| 2071 | if (rc) | ||
| 2072 | goto free_pages; | ||
| 2073 | |||
| 2074 | *mid = smb2_find_mid(server, buf); | ||
| 2075 | if (*mid == NULL) | ||
| 2076 | cifs_dbg(FYI, "mid not found\n"); | ||
| 2077 | else { | ||
| 2078 | cifs_dbg(FYI, "mid found\n"); | ||
| 2079 | (*mid)->decrypted = true; | ||
| 2080 | rc = handle_read_data(server, *mid, buf, | ||
| 2081 | server->vals->read_rsp_size, | ||
| 2082 | pages, npages, len); | ||
| 2083 | } | ||
| 2084 | |||
| 2085 | free_pages: | ||
| 2086 | for (i = i - 1; i >= 0; i--) | ||
| 2087 | put_page(pages[i]); | ||
| 2088 | kfree(pages); | ||
| 2089 | return rc; | ||
| 2090 | discard_data: | ||
| 2091 | cifs_discard_remaining_data(server); | ||
| 2092 | goto free_pages; | ||
| 2093 | } | ||
| 2094 | |||
| 2095 | static int | ||
| 2096 | receive_encrypted_standard(struct TCP_Server_Info *server, | ||
| 2097 | struct mid_q_entry **mid) | ||
| 2098 | { | ||
| 2099 | int length; | ||
| 2100 | char *buf = server->smallbuf; | ||
| 2101 | unsigned int pdu_length = get_rfc1002_length(buf); | ||
| 2102 | unsigned int buf_size; | ||
| 2103 | struct mid_q_entry *mid_entry; | ||
| 2104 | |||
| 2105 | /* switch to large buffer if too big for a small one */ | ||
| 2106 | if (pdu_length + 4 > MAX_CIFS_SMALL_BUFFER_SIZE) { | ||
| 2107 | server->large_buf = true; | ||
| 2108 | memcpy(server->bigbuf, buf, server->total_read); | ||
| 2109 | buf = server->bigbuf; | ||
| 2110 | } | ||
| 2111 | |||
| 2112 | /* now read the rest */ | ||
| 2113 | length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, | ||
| 2114 | pdu_length - HEADER_SIZE(server) + 1 + 4); | ||
| 2115 | if (length < 0) | ||
| 2116 | return length; | ||
| 2117 | server->total_read += length; | ||
| 2118 | |||
| 2119 | buf_size = pdu_length + 4 - sizeof(struct smb2_transform_hdr); | ||
| 2120 | length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0); | ||
| 2121 | if (length) | ||
| 2122 | return length; | ||
| 2123 | |||
| 2124 | mid_entry = smb2_find_mid(server, buf); | ||
| 2125 | if (mid_entry == NULL) | ||
| 2126 | cifs_dbg(FYI, "mid not found\n"); | ||
| 2127 | else { | ||
| 2128 | cifs_dbg(FYI, "mid found\n"); | ||
| 2129 | mid_entry->decrypted = true; | ||
| 2130 | } | ||
| 2131 | |||
| 2132 | *mid = mid_entry; | ||
| 2133 | |||
| 2134 | if (mid_entry && mid_entry->handle) | ||
| 2135 | return mid_entry->handle(server, mid_entry); | ||
| 2136 | |||
| 2137 | return cifs_handle_standard(server, mid_entry); | ||
| 2138 | } | ||
| 2139 | |||
| 2140 | static int | ||
| 2141 | smb3_receive_transform(struct TCP_Server_Info *server, struct mid_q_entry **mid) | ||
| 2142 | { | ||
| 2143 | char *buf = server->smallbuf; | ||
| 2144 | unsigned int pdu_length = get_rfc1002_length(buf); | ||
| 2145 | struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; | ||
| 2146 | unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize); | ||
| 2147 | |||
| 2148 | if (pdu_length + 4 < sizeof(struct smb2_transform_hdr) + | ||
| 2149 | sizeof(struct smb2_sync_hdr)) { | ||
| 2150 | cifs_dbg(VFS, "Transform message is too small (%u)\n", | ||
| 2151 | pdu_length); | ||
| 2152 | cifs_reconnect(server); | ||
| 2153 | wake_up(&server->response_q); | ||
| 2154 | return -ECONNABORTED; | ||
| 2155 | } | ||
| 2156 | |||
| 2157 | if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) { | ||
| 2158 | cifs_dbg(VFS, "Transform message is broken\n"); | ||
| 2159 | cifs_reconnect(server); | ||
| 2160 | wake_up(&server->response_q); | ||
| 2161 | return -ECONNABORTED; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | if (pdu_length + 4 > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) | ||
| 2165 | return receive_encrypted_read(server, mid); | ||
| 2166 | |||
| 2167 | return receive_encrypted_standard(server, mid); | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | int | ||
| 2171 | smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
| 2172 | { | ||
| 2173 | char *buf = server->large_buf ? server->bigbuf : server->smallbuf; | ||
| 2174 | |||
| 2175 | return handle_read_data(server, mid, buf, get_rfc1002_length(buf) + 4, | ||
| 2176 | NULL, 0, 0); | ||
| 2177 | } | ||
| 2178 | |||
| 1548 | struct smb_version_operations smb20_operations = { | 2179 | struct smb_version_operations smb20_operations = { |
| 1549 | .compare_fids = smb2_compare_fids, | 2180 | .compare_fids = smb2_compare_fids, |
| 1550 | .setup_request = smb2_setup_request, | 2181 | .setup_request = smb2_setup_request, |
| @@ -1791,6 +2422,10 @@ struct smb_version_operations smb30_operations = { | |||
| 1791 | .dir_needs_close = smb2_dir_needs_close, | 2422 | .dir_needs_close = smb2_dir_needs_close, |
| 1792 | .fallocate = smb3_fallocate, | 2423 | .fallocate = smb3_fallocate, |
| 1793 | .enum_snapshots = smb3_enum_snapshots, | 2424 | .enum_snapshots = smb3_enum_snapshots, |
| 2425 | .init_transform_rq = smb3_init_transform_rq, | ||
| 2426 | .free_transform_rq = smb3_free_transform_rq, | ||
| 2427 | .is_transform_hdr = smb3_is_transform_hdr, | ||
| 2428 | .receive_transform = smb3_receive_transform, | ||
| 1794 | }; | 2429 | }; |
| 1795 | 2430 | ||
| 1796 | #ifdef CONFIG_CIFS_SMB311 | 2431 | #ifdef CONFIG_CIFS_SMB311 |
| @@ -1879,6 +2514,10 @@ struct smb_version_operations smb311_operations = { | |||
| 1879 | .dir_needs_close = smb2_dir_needs_close, | 2514 | .dir_needs_close = smb2_dir_needs_close, |
| 1880 | .fallocate = smb3_fallocate, | 2515 | .fallocate = smb3_fallocate, |
| 1881 | .enum_snapshots = smb3_enum_snapshots, | 2516 | .enum_snapshots = smb3_enum_snapshots, |
| 2517 | .init_transform_rq = smb3_init_transform_rq, | ||
| 2518 | .free_transform_rq = smb3_free_transform_rq, | ||
| 2519 | .is_transform_hdr = smb3_is_transform_hdr, | ||
| 2520 | .receive_transform = smb3_receive_transform, | ||
| 1882 | }; | 2521 | }; |
| 1883 | #endif /* CIFS_SMB311 */ | 2522 | #endif /* CIFS_SMB311 */ |
| 1884 | 2523 | ||
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 87457227812c..ad83b3db2840 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
| @@ -77,45 +77,42 @@ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { | |||
| 77 | /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ | 77 | /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */ |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | static int encryption_required(const struct cifs_tcon *tcon) | ||
| 81 | { | ||
| 82 | if (!tcon) | ||
| 83 | return 0; | ||
| 84 | if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) || | ||
| 85 | (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)) | ||
| 86 | return 1; | ||
| 87 | if (tcon->seal && | ||
| 88 | (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) | ||
| 89 | return 1; | ||
| 90 | return 0; | ||
| 91 | } | ||
| 80 | 92 | ||
| 81 | static void | 93 | static void |
| 82 | smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , | 94 | smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd, |
| 83 | const struct cifs_tcon *tcon) | 95 | const struct cifs_tcon *tcon) |
| 84 | { | 96 | { |
| 85 | struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; | 97 | shdr->ProtocolId = SMB2_PROTO_NUMBER; |
| 86 | char *temp = (char *)hdr; | 98 | shdr->StructureSize = cpu_to_le16(64); |
| 87 | /* lookup word count ie StructureSize from table */ | 99 | shdr->Command = smb2_cmd; |
| 88 | __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)]; | ||
| 89 | |||
| 90 | /* | ||
| 91 | * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of | ||
| 92 | * largest operations (Create) | ||
| 93 | */ | ||
| 94 | memset(temp, 0, 256); | ||
| 95 | |||
| 96 | /* Note this is only network field converted to big endian */ | ||
| 97 | hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr) | ||
| 98 | - 4 /* RFC 1001 length field itself not counted */); | ||
| 99 | |||
| 100 | hdr->ProtocolId = SMB2_PROTO_NUMBER; | ||
| 101 | hdr->StructureSize = cpu_to_le16(64); | ||
| 102 | hdr->Command = smb2_cmd; | ||
| 103 | if (tcon && tcon->ses && tcon->ses->server) { | 100 | if (tcon && tcon->ses && tcon->ses->server) { |
| 104 | struct TCP_Server_Info *server = tcon->ses->server; | 101 | struct TCP_Server_Info *server = tcon->ses->server; |
| 105 | 102 | ||
| 106 | spin_lock(&server->req_lock); | 103 | spin_lock(&server->req_lock); |
| 107 | /* Request up to 2 credits but don't go over the limit. */ | 104 | /* Request up to 2 credits but don't go over the limit. */ |
| 108 | if (server->credits >= server->max_credits) | 105 | if (server->credits >= server->max_credits) |
| 109 | hdr->CreditRequest = cpu_to_le16(0); | 106 | shdr->CreditRequest = cpu_to_le16(0); |
| 110 | else | 107 | else |
| 111 | hdr->CreditRequest = cpu_to_le16( | 108 | shdr->CreditRequest = cpu_to_le16( |
| 112 | min_t(int, server->max_credits - | 109 | min_t(int, server->max_credits - |
| 113 | server->credits, 2)); | 110 | server->credits, 2)); |
| 114 | spin_unlock(&server->req_lock); | 111 | spin_unlock(&server->req_lock); |
| 115 | } else { | 112 | } else { |
| 116 | hdr->CreditRequest = cpu_to_le16(2); | 113 | shdr->CreditRequest = cpu_to_le16(2); |
| 117 | } | 114 | } |
| 118 | hdr->ProcessId = cpu_to_le32((__u16)current->tgid); | 115 | shdr->ProcessId = cpu_to_le32((__u16)current->tgid); |
| 119 | 116 | ||
| 120 | if (!tcon) | 117 | if (!tcon) |
| 121 | goto out; | 118 | goto out; |
| @@ -124,13 +121,13 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , | |||
| 124 | /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ | 121 | /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ |
| 125 | if ((tcon->ses) && (tcon->ses->server) && | 122 | if ((tcon->ses) && (tcon->ses->server) && |
| 126 | (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) | 123 | (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) |
| 127 | hdr->CreditCharge = cpu_to_le16(1); | 124 | shdr->CreditCharge = cpu_to_le16(1); |
| 128 | /* else CreditCharge MBZ */ | 125 | /* else CreditCharge MBZ */ |
| 129 | 126 | ||
| 130 | hdr->TreeId = tcon->tid; | 127 | shdr->TreeId = tcon->tid; |
| 131 | /* Uid is not converted */ | 128 | /* Uid is not converted */ |
| 132 | if (tcon->ses) | 129 | if (tcon->ses) |
| 133 | hdr->SessionId = tcon->ses->Suid; | 130 | shdr->SessionId = tcon->ses->Suid; |
| 134 | 131 | ||
| 135 | /* | 132 | /* |
| 136 | * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have | 133 | * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have |
| @@ -143,12 +140,12 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , | |||
| 143 | * but it is safer to net set it for now. | 140 | * but it is safer to net set it for now. |
| 144 | */ | 141 | */ |
| 145 | /* if (tcon->share_flags & SHI1005_FLAGS_DFS) | 142 | /* if (tcon->share_flags & SHI1005_FLAGS_DFS) |
| 146 | hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ | 143 | shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */ |
| 147 | 144 | ||
| 148 | if (tcon->ses && tcon->ses->server && tcon->ses->server->sign) | 145 | if (tcon->ses && tcon->ses->server && tcon->ses->server->sign && |
| 149 | hdr->Flags |= SMB2_FLAGS_SIGNED; | 146 | !encryption_required(tcon)) |
| 147 | shdr->Flags |= SMB2_FLAGS_SIGNED; | ||
| 150 | out: | 148 | out: |
| 151 | pdu->StructureSize2 = cpu_to_le16(parmsize); | ||
| 152 | return; | 149 | return; |
| 153 | } | 150 | } |
| 154 | 151 | ||
| @@ -289,16 +286,74 @@ out: | |||
| 289 | return rc; | 286 | return rc; |
| 290 | } | 287 | } |
| 291 | 288 | ||
| 289 | static void | ||
| 290 | fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, | ||
| 291 | unsigned int *total_len) | ||
| 292 | { | ||
| 293 | struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf; | ||
| 294 | /* lookup word count ie StructureSize from table */ | ||
| 295 | __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; | ||
| 296 | |||
| 297 | /* | ||
| 298 | * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of | ||
| 299 | * largest operations (Create) | ||
| 300 | */ | ||
| 301 | memset(buf, 0, 256); | ||
| 302 | |||
| 303 | smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon); | ||
| 304 | spdu->StructureSize2 = cpu_to_le16(parmsize); | ||
| 305 | |||
| 306 | *total_len = parmsize + sizeof(struct smb2_sync_hdr); | ||
| 307 | } | ||
| 308 | |||
| 309 | /* init request without RFC1001 length at the beginning */ | ||
| 310 | static int | ||
| 311 | smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, | ||
| 312 | void **request_buf, unsigned int *total_len) | ||
| 313 | { | ||
| 314 | int rc; | ||
| 315 | struct smb2_sync_hdr *shdr; | ||
| 316 | |||
| 317 | rc = smb2_reconnect(smb2_command, tcon); | ||
| 318 | if (rc) | ||
| 319 | return rc; | ||
| 320 | |||
| 321 | /* BB eventually switch this to SMB2 specific small buf size */ | ||
| 322 | *request_buf = cifs_small_buf_get(); | ||
| 323 | if (*request_buf == NULL) { | ||
| 324 | /* BB should we add a retry in here if not a writepage? */ | ||
| 325 | return -ENOMEM; | ||
| 326 | } | ||
| 327 | |||
| 328 | shdr = (struct smb2_sync_hdr *)(*request_buf); | ||
| 329 | |||
| 330 | fill_small_buf(smb2_command, tcon, shdr, total_len); | ||
| 331 | |||
| 332 | if (tcon != NULL) { | ||
| 333 | #ifdef CONFIG_CIFS_STATS2 | ||
| 334 | uint16_t com_code = le16_to_cpu(smb2_command); | ||
| 335 | |||
| 336 | cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); | ||
| 337 | #endif | ||
| 338 | cifs_stats_inc(&tcon->num_smbs_sent); | ||
| 339 | } | ||
| 340 | |||
| 341 | return rc; | ||
| 342 | } | ||
| 343 | |||
| 292 | /* | 344 | /* |
| 293 | * Allocate and return pointer to an SMB request hdr, and set basic | 345 | * Allocate and return pointer to an SMB request hdr, and set basic |
| 294 | * SMB information in the SMB header. If the return code is zero, this | 346 | * SMB information in the SMB header. If the return code is zero, this |
| 295 | * function must have filled in request_buf pointer. | 347 | * function must have filled in request_buf pointer. The returned buffer |
| 348 | * has RFC1001 length at the beginning. | ||
| 296 | */ | 349 | */ |
| 297 | static int | 350 | static int |
| 298 | small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, | 351 | small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, |
| 299 | void **request_buf) | 352 | void **request_buf) |
| 300 | { | 353 | { |
| 301 | int rc = 0; | 354 | int rc; |
| 355 | unsigned int total_len; | ||
| 356 | struct smb2_pdu *pdu; | ||
| 302 | 357 | ||
| 303 | rc = smb2_reconnect(smb2_command, tcon); | 358 | rc = smb2_reconnect(smb2_command, tcon); |
| 304 | if (rc) | 359 | if (rc) |
| @@ -311,7 +366,12 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, | |||
| 311 | return -ENOMEM; | 366 | return -ENOMEM; |
| 312 | } | 367 | } |
| 313 | 368 | ||
| 314 | smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon); | 369 | pdu = (struct smb2_pdu *)(*request_buf); |
| 370 | |||
| 371 | fill_small_buf(smb2_command, tcon, get_sync_hdr(pdu), &total_len); | ||
| 372 | |||
| 373 | /* Note this is only network field converted to big endian */ | ||
| 374 | pdu->hdr.smb2_buf_length = cpu_to_be32(total_len); | ||
| 315 | 375 | ||
| 316 | if (tcon != NULL) { | 376 | if (tcon != NULL) { |
| 317 | #ifdef CONFIG_CIFS_STATS2 | 377 | #ifdef CONFIG_CIFS_STATS2 |
| @@ -376,7 +436,6 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req) | |||
| 376 | } | 436 | } |
| 377 | #endif /* SMB311 */ | 437 | #endif /* SMB311 */ |
| 378 | 438 | ||
| 379 | |||
| 380 | /* | 439 | /* |
| 381 | * | 440 | * |
| 382 | * SMB2 Worker functions follow: | 441 | * SMB2 Worker functions follow: |
| @@ -398,6 +457,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
| 398 | struct smb2_negotiate_req *req; | 457 | struct smb2_negotiate_req *req; |
| 399 | struct smb2_negotiate_rsp *rsp; | 458 | struct smb2_negotiate_rsp *rsp; |
| 400 | struct kvec iov[1]; | 459 | struct kvec iov[1]; |
| 460 | struct kvec rsp_iov; | ||
| 401 | int rc = 0; | 461 | int rc = 0; |
| 402 | int resp_buftype; | 462 | int resp_buftype; |
| 403 | struct TCP_Server_Info *server = ses->server; | 463 | struct TCP_Server_Info *server = ses->server; |
| @@ -416,7 +476,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
| 416 | if (rc) | 476 | if (rc) |
| 417 | return rc; | 477 | return rc; |
| 418 | 478 | ||
| 419 | req->hdr.SessionId = 0; | 479 | req->hdr.sync_hdr.SessionId = 0; |
| 420 | 480 | ||
| 421 | req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); | 481 | req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); |
| 422 | 482 | ||
| @@ -446,9 +506,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
| 446 | /* 4 for rfc1002 length field */ | 506 | /* 4 for rfc1002 length field */ |
| 447 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 507 | iov[0].iov_len = get_rfc1002_length(req) + 4; |
| 448 | 508 | ||
| 449 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags); | 509 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
| 450 | 510 | cifs_small_buf_release(req); | |
| 451 | rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base; | 511 | rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; |
| 452 | /* | 512 | /* |
| 453 | * No tcon so can't do | 513 | * No tcon so can't do |
| 454 | * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); | 514 | * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); |
| @@ -627,14 +687,15 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) | |||
| 627 | if (rc) | 687 | if (rc) |
| 628 | return rc; | 688 | return rc; |
| 629 | 689 | ||
| 630 | req->hdr.SessionId = 0; /* First session, not a reauthenticate */ | 690 | /* First session, not a reauthenticate */ |
| 691 | req->hdr.sync_hdr.SessionId = 0; | ||
| 631 | 692 | ||
| 632 | /* if reconnect, we need to send previous sess id, otherwise it is 0 */ | 693 | /* if reconnect, we need to send previous sess id, otherwise it is 0 */ |
| 633 | req->PreviousSessionId = sess_data->previous_session; | 694 | req->PreviousSessionId = sess_data->previous_session; |
| 634 | 695 | ||
| 635 | req->Flags = 0; /* MBZ */ | 696 | req->Flags = 0; /* MBZ */ |
| 636 | /* to enable echos and oplocks */ | 697 | /* to enable echos and oplocks */ |
| 637 | req->hdr.CreditRequest = cpu_to_le16(3); | 698 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(3); |
| 638 | 699 | ||
| 639 | /* only one of SMB2 signing flags may be set in SMB2 request */ | 700 | /* only one of SMB2 signing flags may be set in SMB2 request */ |
| 640 | if (server->sign) | 701 | if (server->sign) |
| @@ -671,6 +732,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) | |||
| 671 | { | 732 | { |
| 672 | int rc; | 733 | int rc; |
| 673 | struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; | 734 | struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base; |
| 735 | struct kvec rsp_iov = { NULL, 0 }; | ||
| 674 | 736 | ||
| 675 | /* Testing shows that buffer offset must be at location of Buffer[0] */ | 737 | /* Testing shows that buffer offset must be at location of Buffer[0] */ |
| 676 | req->SecurityBufferOffset = | 738 | req->SecurityBufferOffset = |
| @@ -685,7 +747,9 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) | |||
| 685 | rc = SendReceive2(sess_data->xid, sess_data->ses, | 747 | rc = SendReceive2(sess_data->xid, sess_data->ses, |
| 686 | sess_data->iov, 2, | 748 | sess_data->iov, 2, |
| 687 | &sess_data->buf0_type, | 749 | &sess_data->buf0_type, |
| 688 | CIFS_LOG_ERROR | CIFS_NEG_OP); | 750 | CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov); |
| 751 | cifs_small_buf_release(sess_data->iov[0].iov_base); | ||
| 752 | memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); | ||
| 689 | 753 | ||
| 690 | return rc; | 754 | return rc; |
| 691 | } | 755 | } |
| @@ -697,15 +761,13 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) | |||
| 697 | struct cifs_ses *ses = sess_data->ses; | 761 | struct cifs_ses *ses = sess_data->ses; |
| 698 | 762 | ||
| 699 | mutex_lock(&ses->server->srv_mutex); | 763 | mutex_lock(&ses->server->srv_mutex); |
| 700 | if (ses->server->sign && ses->server->ops->generate_signingkey) { | 764 | if (ses->server->ops->generate_signingkey) { |
| 701 | rc = ses->server->ops->generate_signingkey(ses); | 765 | rc = ses->server->ops->generate_signingkey(ses); |
| 702 | kfree(ses->auth_key.response); | ||
| 703 | ses->auth_key.response = NULL; | ||
| 704 | if (rc) { | 766 | if (rc) { |
| 705 | cifs_dbg(FYI, | 767 | cifs_dbg(FYI, |
| 706 | "SMB3 session key generation failed\n"); | 768 | "SMB3 session key generation failed\n"); |
| 707 | mutex_unlock(&ses->server->srv_mutex); | 769 | mutex_unlock(&ses->server->srv_mutex); |
| 708 | goto keygen_exit; | 770 | return rc; |
| 709 | } | 771 | } |
| 710 | } | 772 | } |
| 711 | if (!ses->server->session_estab) { | 773 | if (!ses->server->session_estab) { |
| @@ -719,12 +781,6 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) | |||
| 719 | ses->status = CifsGood; | 781 | ses->status = CifsGood; |
| 720 | ses->need_reconnect = false; | 782 | ses->need_reconnect = false; |
| 721 | spin_unlock(&GlobalMid_Lock); | 783 | spin_unlock(&GlobalMid_Lock); |
| 722 | |||
| 723 | keygen_exit: | ||
| 724 | if (!ses->server->sign) { | ||
| 725 | kfree(ses->auth_key.response); | ||
| 726 | ses->auth_key.response = NULL; | ||
| 727 | } | ||
| 728 | return rc; | 784 | return rc; |
| 729 | } | 785 | } |
| 730 | 786 | ||
| @@ -781,11 +837,9 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) | |||
| 781 | goto out_put_spnego_key; | 837 | goto out_put_spnego_key; |
| 782 | 838 | ||
| 783 | rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; | 839 | rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; |
| 784 | ses->Suid = rsp->hdr.SessionId; | 840 | ses->Suid = rsp->hdr.sync_hdr.SessionId; |
| 785 | 841 | ||
| 786 | ses->session_flags = le16_to_cpu(rsp->SessionFlags); | 842 | ses->session_flags = le16_to_cpu(rsp->SessionFlags); |
| 787 | if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) | ||
| 788 | cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); | ||
| 789 | 843 | ||
| 790 | rc = SMB2_sess_establish_session(sess_data); | 844 | rc = SMB2_sess_establish_session(sess_data); |
| 791 | out_put_spnego_key: | 845 | out_put_spnego_key: |
| @@ -859,7 +913,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) | |||
| 859 | 913 | ||
| 860 | /* If true, rc here is expected and not an error */ | 914 | /* If true, rc here is expected and not an error */ |
| 861 | if (sess_data->buf0_type != CIFS_NO_BUFFER && | 915 | if (sess_data->buf0_type != CIFS_NO_BUFFER && |
| 862 | rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) | 916 | rsp->hdr.sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) |
| 863 | rc = 0; | 917 | rc = 0; |
| 864 | 918 | ||
| 865 | if (rc) | 919 | if (rc) |
| @@ -880,10 +934,8 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) | |||
| 880 | cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); | 934 | cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); |
| 881 | 935 | ||
| 882 | 936 | ||
| 883 | ses->Suid = rsp->hdr.SessionId; | 937 | ses->Suid = rsp->hdr.sync_hdr.SessionId; |
| 884 | ses->session_flags = le16_to_cpu(rsp->SessionFlags); | 938 | ses->session_flags = le16_to_cpu(rsp->SessionFlags); |
| 885 | if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) | ||
| 886 | cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); | ||
| 887 | 939 | ||
| 888 | out: | 940 | out: |
| 889 | kfree(ntlmssp_blob); | 941 | kfree(ntlmssp_blob); |
| @@ -916,7 +968,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) | |||
| 916 | goto out; | 968 | goto out; |
| 917 | 969 | ||
| 918 | req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; | 970 | req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; |
| 919 | req->hdr.SessionId = ses->Suid; | 971 | req->hdr.sync_hdr.SessionId = ses->Suid; |
| 920 | 972 | ||
| 921 | rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, | 973 | rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, |
| 922 | sess_data->nls_cp); | 974 | sess_data->nls_cp); |
| @@ -940,10 +992,8 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) | |||
| 940 | 992 | ||
| 941 | rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; | 993 | rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; |
| 942 | 994 | ||
| 943 | ses->Suid = rsp->hdr.SessionId; | 995 | ses->Suid = rsp->hdr.sync_hdr.SessionId; |
| 944 | ses->session_flags = le16_to_cpu(rsp->SessionFlags); | 996 | ses->session_flags = le16_to_cpu(rsp->SessionFlags); |
| 945 | if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) | ||
| 946 | cifs_dbg(VFS, "SMB3 encryption not supported yet\n"); | ||
| 947 | 997 | ||
| 948 | rc = SMB2_sess_establish_session(sess_data); | 998 | rc = SMB2_sess_establish_session(sess_data); |
| 949 | out: | 999 | out: |
| @@ -1018,6 +1068,7 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) | |||
| 1018 | struct smb2_logoff_req *req; /* response is also trivial struct */ | 1068 | struct smb2_logoff_req *req; /* response is also trivial struct */ |
| 1019 | int rc = 0; | 1069 | int rc = 0; |
| 1020 | struct TCP_Server_Info *server; | 1070 | struct TCP_Server_Info *server; |
| 1071 | int flags = 0; | ||
| 1021 | 1072 | ||
| 1022 | cifs_dbg(FYI, "disconnect session %p\n", ses); | 1073 | cifs_dbg(FYI, "disconnect session %p\n", ses); |
| 1023 | 1074 | ||
| @@ -1035,11 +1086,15 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) | |||
| 1035 | return rc; | 1086 | return rc; |
| 1036 | 1087 | ||
| 1037 | /* since no tcon, smb2_init can not do this, so do here */ | 1088 | /* since no tcon, smb2_init can not do this, so do here */ |
| 1038 | req->hdr.SessionId = ses->Suid; | 1089 | req->hdr.sync_hdr.SessionId = ses->Suid; |
| 1039 | if (server->sign) | 1090 | |
| 1040 | req->hdr.Flags |= SMB2_FLAGS_SIGNED; | 1091 | if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) |
| 1092 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1093 | else if (server->sign) | ||
| 1094 | req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; | ||
| 1041 | 1095 | ||
| 1042 | rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0); | 1096 | rc = SendReceiveNoRsp(xid, ses, (char *) req, flags); |
| 1097 | cifs_small_buf_release(req); | ||
| 1043 | /* | 1098 | /* |
| 1044 | * No tcon so can't do | 1099 | * No tcon so can't do |
| 1045 | * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); | 1100 | * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); |
| @@ -1071,11 +1126,13 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
| 1071 | struct smb2_tree_connect_req *req; | 1126 | struct smb2_tree_connect_req *req; |
| 1072 | struct smb2_tree_connect_rsp *rsp = NULL; | 1127 | struct smb2_tree_connect_rsp *rsp = NULL; |
| 1073 | struct kvec iov[2]; | 1128 | struct kvec iov[2]; |
| 1129 | struct kvec rsp_iov; | ||
| 1074 | int rc = 0; | 1130 | int rc = 0; |
| 1075 | int resp_buftype; | 1131 | int resp_buftype; |
| 1076 | int unc_path_len; | 1132 | int unc_path_len; |
| 1077 | struct TCP_Server_Info *server; | 1133 | struct TCP_Server_Info *server; |
| 1078 | __le16 *unc_path = NULL; | 1134 | __le16 *unc_path = NULL; |
| 1135 | int flags = 0; | ||
| 1079 | 1136 | ||
| 1080 | cifs_dbg(FYI, "TCON\n"); | 1137 | cifs_dbg(FYI, "TCON\n"); |
| 1081 | 1138 | ||
| @@ -1087,12 +1144,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
| 1087 | if (tcon && tcon->bad_network_name) | 1144 | if (tcon && tcon->bad_network_name) |
| 1088 | return -ENOENT; | 1145 | return -ENOENT; |
| 1089 | 1146 | ||
| 1090 | if ((tcon && tcon->seal) && | ||
| 1091 | ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { | ||
| 1092 | cifs_dbg(VFS, "encryption requested but no server support"); | ||
| 1093 | return -EOPNOTSUPP; | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); | 1147 | unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); |
| 1097 | if (unc_path == NULL) | 1148 | if (unc_path == NULL) |
| 1098 | return -ENOMEM; | 1149 | return -ENOMEM; |
| @@ -1111,11 +1162,15 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
| 1111 | } | 1162 | } |
| 1112 | 1163 | ||
| 1113 | if (tcon == NULL) { | 1164 | if (tcon == NULL) { |
| 1165 | if ((ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)) | ||
| 1166 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1167 | |||
| 1114 | /* since no tcon, smb2_init can not do this, so do here */ | 1168 | /* since no tcon, smb2_init can not do this, so do here */ |
| 1115 | req->hdr.SessionId = ses->Suid; | 1169 | req->hdr.sync_hdr.SessionId = ses->Suid; |
| 1116 | /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) | 1170 | /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) |
| 1117 | req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ | 1171 | req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ |
| 1118 | } | 1172 | } else if (encryption_required(tcon)) |
| 1173 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1119 | 1174 | ||
| 1120 | iov[0].iov_base = (char *)req; | 1175 | iov[0].iov_base = (char *)req; |
| 1121 | /* 4 for rfc1002 length field and 1 for pad */ | 1176 | /* 4 for rfc1002 length field and 1 for pad */ |
| @@ -1130,8 +1185,9 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
| 1130 | 1185 | ||
| 1131 | inc_rfc1001_len(req, unc_path_len - 1 /* pad */); | 1186 | inc_rfc1001_len(req, unc_path_len - 1 /* pad */); |
| 1132 | 1187 | ||
| 1133 | rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0); | 1188 | rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); |
| 1134 | rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base; | 1189 | cifs_small_buf_release(req); |
| 1190 | rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; | ||
| 1135 | 1191 | ||
| 1136 | if (rc != 0) { | 1192 | if (rc != 0) { |
| 1137 | if (tcon) { | 1193 | if (tcon) { |
| @@ -1142,7 +1198,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
| 1142 | } | 1198 | } |
| 1143 | 1199 | ||
| 1144 | if (tcon == NULL) { | 1200 | if (tcon == NULL) { |
| 1145 | ses->ipc_tid = rsp->hdr.TreeId; | 1201 | ses->ipc_tid = rsp->hdr.sync_hdr.TreeId; |
| 1146 | goto tcon_exit; | 1202 | goto tcon_exit; |
| 1147 | } | 1203 | } |
| 1148 | 1204 | ||
| @@ -1165,15 +1221,18 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
| 1165 | tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); | 1221 | tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); |
| 1166 | tcon->tidStatus = CifsGood; | 1222 | tcon->tidStatus = CifsGood; |
| 1167 | tcon->need_reconnect = false; | 1223 | tcon->need_reconnect = false; |
| 1168 | tcon->tid = rsp->hdr.TreeId; | 1224 | tcon->tid = rsp->hdr.sync_hdr.TreeId; |
| 1169 | strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); | 1225 | strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); |
| 1170 | 1226 | ||
| 1171 | if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && | 1227 | if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) && |
| 1172 | ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) | 1228 | ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) |
| 1173 | cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); | 1229 | cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); |
| 1230 | |||
| 1231 | if (tcon->seal && | ||
| 1232 | !(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) | ||
| 1233 | cifs_dbg(VFS, "Encryption is requested but not supported\n"); | ||
| 1234 | |||
| 1174 | init_copy_chunk_defaults(tcon); | 1235 | init_copy_chunk_defaults(tcon); |
| 1175 | if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA) | ||
| 1176 | cifs_dbg(VFS, "Encrypted shares not supported"); | ||
| 1177 | if (tcon->ses->server->ops->validate_negotiate) | 1236 | if (tcon->ses->server->ops->validate_negotiate) |
| 1178 | rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); | 1237 | rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); |
| 1179 | tcon_exit: | 1238 | tcon_exit: |
| @@ -1182,7 +1241,7 @@ tcon_exit: | |||
| 1182 | return rc; | 1241 | return rc; |
| 1183 | 1242 | ||
| 1184 | tcon_error_exit: | 1243 | tcon_error_exit: |
| 1185 | if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) { | 1244 | if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { |
| 1186 | cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); | 1245 | cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); |
| 1187 | if (tcon) | 1246 | if (tcon) |
| 1188 | tcon->bad_network_name = true; | 1247 | tcon->bad_network_name = true; |
| @@ -1197,6 +1256,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 1197 | int rc = 0; | 1256 | int rc = 0; |
| 1198 | struct TCP_Server_Info *server; | 1257 | struct TCP_Server_Info *server; |
| 1199 | struct cifs_ses *ses = tcon->ses; | 1258 | struct cifs_ses *ses = tcon->ses; |
| 1259 | int flags = 0; | ||
| 1200 | 1260 | ||
| 1201 | cifs_dbg(FYI, "Tree Disconnect\n"); | 1261 | cifs_dbg(FYI, "Tree Disconnect\n"); |
| 1202 | 1262 | ||
| @@ -1212,7 +1272,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) | |||
| 1212 | if (rc) | 1272 | if (rc) |
| 1213 | return rc; | 1273 | return rc; |
| 1214 | 1274 | ||
| 1215 | rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0); | 1275 | if (encryption_required(tcon)) |
| 1276 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1277 | |||
| 1278 | rc = SendReceiveNoRsp(xid, ses, (char *)req, flags); | ||
| 1279 | cifs_small_buf_release(req); | ||
| 1216 | if (rc) | 1280 | if (rc) |
| 1217 | cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); | 1281 | cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); |
| 1218 | 1282 | ||
| @@ -1474,14 +1538,16 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
| 1474 | struct cifs_tcon *tcon = oparms->tcon; | 1538 | struct cifs_tcon *tcon = oparms->tcon; |
| 1475 | struct cifs_ses *ses = tcon->ses; | 1539 | struct cifs_ses *ses = tcon->ses; |
| 1476 | struct kvec iov[4]; | 1540 | struct kvec iov[4]; |
| 1541 | struct kvec rsp_iov; | ||
| 1477 | int resp_buftype; | 1542 | int resp_buftype; |
| 1478 | int uni_path_len; | 1543 | int uni_path_len; |
| 1479 | __le16 *copy_path = NULL; | 1544 | __le16 *copy_path = NULL; |
| 1480 | int copy_size; | 1545 | int copy_size; |
| 1481 | int rc = 0; | 1546 | int rc = 0; |
| 1482 | unsigned int num_iovecs = 2; | 1547 | unsigned int n_iov = 2; |
| 1483 | __u32 file_attributes = 0; | 1548 | __u32 file_attributes = 0; |
| 1484 | char *dhc_buf = NULL, *lc_buf = NULL; | 1549 | char *dhc_buf = NULL, *lc_buf = NULL; |
| 1550 | int flags = 0; | ||
| 1485 | 1551 | ||
| 1486 | cifs_dbg(FYI, "create/open\n"); | 1552 | cifs_dbg(FYI, "create/open\n"); |
| 1487 | 1553 | ||
| @@ -1494,6 +1560,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
| 1494 | if (rc) | 1560 | if (rc) |
| 1495 | return rc; | 1561 | return rc; |
| 1496 | 1562 | ||
| 1563 | if (encryption_required(tcon)) | ||
| 1564 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1565 | |||
| 1497 | if (oparms->create_options & CREATE_OPTION_READONLY) | 1566 | if (oparms->create_options & CREATE_OPTION_READONLY) |
| 1498 | file_attributes |= ATTR_READONLY; | 1567 | file_attributes |= ATTR_READONLY; |
| 1499 | if (oparms->create_options & CREATE_OPTION_SPECIAL) | 1568 | if (oparms->create_options & CREATE_OPTION_SPECIAL) |
| @@ -1544,25 +1613,25 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
| 1544 | *oplock == SMB2_OPLOCK_LEVEL_NONE) | 1613 | *oplock == SMB2_OPLOCK_LEVEL_NONE) |
| 1545 | req->RequestedOplockLevel = *oplock; | 1614 | req->RequestedOplockLevel = *oplock; |
| 1546 | else { | 1615 | else { |
| 1547 | rc = add_lease_context(server, iov, &num_iovecs, oplock); | 1616 | rc = add_lease_context(server, iov, &n_iov, oplock); |
| 1548 | if (rc) { | 1617 | if (rc) { |
| 1549 | cifs_small_buf_release(req); | 1618 | cifs_small_buf_release(req); |
| 1550 | kfree(copy_path); | 1619 | kfree(copy_path); |
| 1551 | return rc; | 1620 | return rc; |
| 1552 | } | 1621 | } |
| 1553 | lc_buf = iov[num_iovecs-1].iov_base; | 1622 | lc_buf = iov[n_iov-1].iov_base; |
| 1554 | } | 1623 | } |
| 1555 | 1624 | ||
| 1556 | if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { | 1625 | if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) { |
| 1557 | /* need to set Next field of lease context if we request it */ | 1626 | /* need to set Next field of lease context if we request it */ |
| 1558 | if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) { | 1627 | if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) { |
| 1559 | struct create_context *ccontext = | 1628 | struct create_context *ccontext = |
| 1560 | (struct create_context *)iov[num_iovecs-1].iov_base; | 1629 | (struct create_context *)iov[n_iov-1].iov_base; |
| 1561 | ccontext->Next = | 1630 | ccontext->Next = |
| 1562 | cpu_to_le32(server->vals->create_lease_size); | 1631 | cpu_to_le32(server->vals->create_lease_size); |
| 1563 | } | 1632 | } |
| 1564 | 1633 | ||
| 1565 | rc = add_durable_context(iov, &num_iovecs, oparms, | 1634 | rc = add_durable_context(iov, &n_iov, oparms, |
| 1566 | tcon->use_persistent); | 1635 | tcon->use_persistent); |
| 1567 | if (rc) { | 1636 | if (rc) { |
| 1568 | cifs_small_buf_release(req); | 1637 | cifs_small_buf_release(req); |
| @@ -1570,11 +1639,12 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
| 1570 | kfree(lc_buf); | 1639 | kfree(lc_buf); |
| 1571 | return rc; | 1640 | return rc; |
| 1572 | } | 1641 | } |
| 1573 | dhc_buf = iov[num_iovecs-1].iov_base; | 1642 | dhc_buf = iov[n_iov-1].iov_base; |
| 1574 | } | 1643 | } |
| 1575 | 1644 | ||
| 1576 | rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); | 1645 | rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); |
| 1577 | rsp = (struct smb2_create_rsp *)iov[0].iov_base; | 1646 | cifs_small_buf_release(req); |
| 1647 | rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; | ||
| 1578 | 1648 | ||
| 1579 | if (rc != 0) { | 1649 | if (rc != 0) { |
| 1580 | cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); | 1650 | cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); |
| @@ -1618,12 +1688,15 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1618 | { | 1688 | { |
| 1619 | struct smb2_ioctl_req *req; | 1689 | struct smb2_ioctl_req *req; |
| 1620 | struct smb2_ioctl_rsp *rsp; | 1690 | struct smb2_ioctl_rsp *rsp; |
| 1691 | struct smb2_sync_hdr *shdr; | ||
| 1621 | struct TCP_Server_Info *server; | 1692 | struct TCP_Server_Info *server; |
| 1622 | struct cifs_ses *ses; | 1693 | struct cifs_ses *ses; |
| 1623 | struct kvec iov[2]; | 1694 | struct kvec iov[2]; |
| 1695 | struct kvec rsp_iov; | ||
| 1624 | int resp_buftype; | 1696 | int resp_buftype; |
| 1625 | int num_iovecs; | 1697 | int n_iov; |
| 1626 | int rc = 0; | 1698 | int rc = 0; |
| 1699 | int flags = 0; | ||
| 1627 | 1700 | ||
| 1628 | cifs_dbg(FYI, "SMB2 IOCTL\n"); | 1701 | cifs_dbg(FYI, "SMB2 IOCTL\n"); |
| 1629 | 1702 | ||
| @@ -1648,6 +1721,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1648 | if (rc) | 1721 | if (rc) |
| 1649 | return rc; | 1722 | return rc; |
| 1650 | 1723 | ||
| 1724 | if (encryption_required(tcon)) | ||
| 1725 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1726 | |||
| 1651 | req->CtlCode = cpu_to_le32(opcode); | 1727 | req->CtlCode = cpu_to_le32(opcode); |
| 1652 | req->PersistentFileId = persistent_fid; | 1728 | req->PersistentFileId = persistent_fid; |
| 1653 | req->VolatileFileId = volatile_fid; | 1729 | req->VolatileFileId = volatile_fid; |
| @@ -1659,9 +1735,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1659 | cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4); | 1735 | cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4); |
| 1660 | iov[1].iov_base = in_data; | 1736 | iov[1].iov_base = in_data; |
| 1661 | iov[1].iov_len = indatalen; | 1737 | iov[1].iov_len = indatalen; |
| 1662 | num_iovecs = 2; | 1738 | n_iov = 2; |
| 1663 | } else | 1739 | } else |
| 1664 | num_iovecs = 1; | 1740 | n_iov = 1; |
| 1665 | 1741 | ||
| 1666 | req->OutputOffset = 0; | 1742 | req->OutputOffset = 0; |
| 1667 | req->OutputCount = 0; /* MBZ */ | 1743 | req->OutputCount = 0; /* MBZ */ |
| @@ -1698,8 +1774,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1698 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 1774 | iov[0].iov_len = get_rfc1002_length(req) + 4; |
| 1699 | 1775 | ||
| 1700 | 1776 | ||
| 1701 | rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); | 1777 | rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); |
| 1702 | rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; | 1778 | cifs_small_buf_release(req); |
| 1779 | rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; | ||
| 1703 | 1780 | ||
| 1704 | if ((rc != 0) && (rc != -EINVAL)) { | 1781 | if ((rc != 0) && (rc != -EINVAL)) { |
| 1705 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); | 1782 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); |
| @@ -1742,9 +1819,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 1742 | goto ioctl_exit; | 1819 | goto ioctl_exit; |
| 1743 | } | 1820 | } |
| 1744 | 1821 | ||
| 1745 | memcpy(*out_data, | 1822 | shdr = get_sync_hdr(rsp); |
| 1746 | (char *)&rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset), | 1823 | memcpy(*out_data, (char *)shdr + le32_to_cpu(rsp->OutputOffset), *plen); |
| 1747 | *plen); | ||
| 1748 | ioctl_exit: | 1824 | ioctl_exit: |
| 1749 | free_rsp_buf(resp_buftype, rsp); | 1825 | free_rsp_buf(resp_buftype, rsp); |
| 1750 | return rc; | 1826 | return rc; |
| @@ -1784,8 +1860,10 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1784 | struct TCP_Server_Info *server; | 1860 | struct TCP_Server_Info *server; |
| 1785 | struct cifs_ses *ses = tcon->ses; | 1861 | struct cifs_ses *ses = tcon->ses; |
| 1786 | struct kvec iov[1]; | 1862 | struct kvec iov[1]; |
| 1863 | struct kvec rsp_iov; | ||
| 1787 | int resp_buftype; | 1864 | int resp_buftype; |
| 1788 | int rc = 0; | 1865 | int rc = 0; |
| 1866 | int flags = 0; | ||
| 1789 | 1867 | ||
| 1790 | cifs_dbg(FYI, "Close\n"); | 1868 | cifs_dbg(FYI, "Close\n"); |
| 1791 | 1869 | ||
| @@ -1798,6 +1876,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1798 | if (rc) | 1876 | if (rc) |
| 1799 | return rc; | 1877 | return rc; |
| 1800 | 1878 | ||
| 1879 | if (encryption_required(tcon)) | ||
| 1880 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1881 | |||
| 1801 | req->PersistentFileId = persistent_fid; | 1882 | req->PersistentFileId = persistent_fid; |
| 1802 | req->VolatileFileId = volatile_fid; | 1883 | req->VolatileFileId = volatile_fid; |
| 1803 | 1884 | ||
| @@ -1805,8 +1886,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1805 | /* 4 for rfc1002 length field */ | 1886 | /* 4 for rfc1002 length field */ |
| 1806 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 1887 | iov[0].iov_len = get_rfc1002_length(req) + 4; |
| 1807 | 1888 | ||
| 1808 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); | 1889 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
| 1809 | rsp = (struct smb2_close_rsp *)iov[0].iov_base; | 1890 | cifs_small_buf_release(req); |
| 1891 | rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; | ||
| 1810 | 1892 | ||
| 1811 | if (rc != 0) { | 1893 | if (rc != 0) { |
| 1812 | cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); | 1894 | cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); |
| @@ -1885,10 +1967,12 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1885 | struct smb2_query_info_req *req; | 1967 | struct smb2_query_info_req *req; |
| 1886 | struct smb2_query_info_rsp *rsp = NULL; | 1968 | struct smb2_query_info_rsp *rsp = NULL; |
| 1887 | struct kvec iov[2]; | 1969 | struct kvec iov[2]; |
| 1970 | struct kvec rsp_iov; | ||
| 1888 | int rc = 0; | 1971 | int rc = 0; |
| 1889 | int resp_buftype; | 1972 | int resp_buftype; |
| 1890 | struct TCP_Server_Info *server; | 1973 | struct TCP_Server_Info *server; |
| 1891 | struct cifs_ses *ses = tcon->ses; | 1974 | struct cifs_ses *ses = tcon->ses; |
| 1975 | int flags = 0; | ||
| 1892 | 1976 | ||
| 1893 | cifs_dbg(FYI, "Query Info\n"); | 1977 | cifs_dbg(FYI, "Query Info\n"); |
| 1894 | 1978 | ||
| @@ -1901,6 +1985,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1901 | if (rc) | 1985 | if (rc) |
| 1902 | return rc; | 1986 | return rc; |
| 1903 | 1987 | ||
| 1988 | if (encryption_required(tcon)) | ||
| 1989 | flags |= CIFS_TRANSFORM_REQ; | ||
| 1990 | |||
| 1904 | req->InfoType = SMB2_O_INFO_FILE; | 1991 | req->InfoType = SMB2_O_INFO_FILE; |
| 1905 | req->FileInfoClass = info_class; | 1992 | req->FileInfoClass = info_class; |
| 1906 | req->PersistentFileId = persistent_fid; | 1993 | req->PersistentFileId = persistent_fid; |
| @@ -1914,8 +2001,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1914 | /* 4 for rfc1002 length field */ | 2001 | /* 4 for rfc1002 length field */ |
| 1915 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 2002 | iov[0].iov_len = get_rfc1002_length(req) + 4; |
| 1916 | 2003 | ||
| 1917 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); | 2004 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
| 1918 | rsp = (struct smb2_query_info_rsp *)iov[0].iov_base; | 2005 | cifs_small_buf_release(req); |
| 2006 | rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; | ||
| 1919 | 2007 | ||
| 1920 | if (rc) { | 2008 | if (rc) { |
| 1921 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); | 2009 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); |
| @@ -1963,11 +2051,11 @@ static void | |||
| 1963 | smb2_echo_callback(struct mid_q_entry *mid) | 2051 | smb2_echo_callback(struct mid_q_entry *mid) |
| 1964 | { | 2052 | { |
| 1965 | struct TCP_Server_Info *server = mid->callback_data; | 2053 | struct TCP_Server_Info *server = mid->callback_data; |
| 1966 | struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf; | 2054 | struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; |
| 1967 | unsigned int credits_received = 1; | 2055 | unsigned int credits_received = 1; |
| 1968 | 2056 | ||
| 1969 | if (mid->mid_state == MID_RESPONSE_RECEIVED) | 2057 | if (mid->mid_state == MID_RESPONSE_RECEIVED) |
| 1970 | credits_received = le16_to_cpu(smb2->hdr.CreditRequest); | 2058 | credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); |
| 1971 | 2059 | ||
| 1972 | mutex_lock(&server->srv_mutex); | 2060 | mutex_lock(&server->srv_mutex); |
| 1973 | DeleteMidQEntry(mid); | 2061 | DeleteMidQEntry(mid); |
| @@ -2029,9 +2117,9 @@ SMB2_echo(struct TCP_Server_Info *server) | |||
| 2029 | { | 2117 | { |
| 2030 | struct smb2_echo_req *req; | 2118 | struct smb2_echo_req *req; |
| 2031 | int rc = 0; | 2119 | int rc = 0; |
| 2032 | struct kvec iov; | 2120 | struct kvec iov[2]; |
| 2033 | struct smb_rqst rqst = { .rq_iov = &iov, | 2121 | struct smb_rqst rqst = { .rq_iov = iov, |
| 2034 | .rq_nvec = 1 }; | 2122 | .rq_nvec = 2 }; |
| 2035 | 2123 | ||
| 2036 | cifs_dbg(FYI, "In echo request\n"); | 2124 | cifs_dbg(FYI, "In echo request\n"); |
| 2037 | 2125 | ||
| @@ -2045,14 +2133,16 @@ SMB2_echo(struct TCP_Server_Info *server) | |||
| 2045 | if (rc) | 2133 | if (rc) |
| 2046 | return rc; | 2134 | return rc; |
| 2047 | 2135 | ||
| 2048 | req->hdr.CreditRequest = cpu_to_le16(1); | 2136 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); |
| 2049 | 2137 | ||
| 2050 | iov.iov_base = (char *)req; | ||
| 2051 | /* 4 for rfc1002 length field */ | 2138 | /* 4 for rfc1002 length field */ |
| 2052 | iov.iov_len = get_rfc1002_length(req) + 4; | 2139 | iov[0].iov_len = 4; |
| 2140 | iov[0].iov_base = (char *)req; | ||
| 2141 | iov[1].iov_len = get_rfc1002_length(req); | ||
| 2142 | iov[1].iov_base = (char *)req + 4; | ||
| 2053 | 2143 | ||
| 2054 | rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server, | 2144 | rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, |
| 2055 | CIFS_ECHO_OP); | 2145 | server, CIFS_ECHO_OP); |
| 2056 | if (rc) | 2146 | if (rc) |
| 2057 | cifs_dbg(FYI, "Echo request failed: %d\n", rc); | 2147 | cifs_dbg(FYI, "Echo request failed: %d\n", rc); |
| 2058 | 2148 | ||
| @@ -2068,8 +2158,10 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 2068 | struct TCP_Server_Info *server; | 2158 | struct TCP_Server_Info *server; |
| 2069 | struct cifs_ses *ses = tcon->ses; | 2159 | struct cifs_ses *ses = tcon->ses; |
| 2070 | struct kvec iov[1]; | 2160 | struct kvec iov[1]; |
| 2161 | struct kvec rsp_iov; | ||
| 2071 | int resp_buftype; | 2162 | int resp_buftype; |
| 2072 | int rc = 0; | 2163 | int rc = 0; |
| 2164 | int flags = 0; | ||
| 2073 | 2165 | ||
| 2074 | cifs_dbg(FYI, "Flush\n"); | 2166 | cifs_dbg(FYI, "Flush\n"); |
| 2075 | 2167 | ||
| @@ -2082,6 +2174,9 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 2082 | if (rc) | 2174 | if (rc) |
| 2083 | return rc; | 2175 | return rc; |
| 2084 | 2176 | ||
| 2177 | if (encryption_required(tcon)) | ||
| 2178 | flags |= CIFS_TRANSFORM_REQ; | ||
| 2179 | |||
| 2085 | req->PersistentFileId = persistent_fid; | 2180 | req->PersistentFileId = persistent_fid; |
| 2086 | req->VolatileFileId = volatile_fid; | 2181 | req->VolatileFileId = volatile_fid; |
| 2087 | 2182 | ||
| @@ -2089,12 +2184,13 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 2089 | /* 4 for rfc1002 length field */ | 2184 | /* 4 for rfc1002 length field */ |
| 2090 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 2185 | iov[0].iov_len = get_rfc1002_length(req) + 4; |
| 2091 | 2186 | ||
| 2092 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); | 2187 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
| 2188 | cifs_small_buf_release(req); | ||
| 2093 | 2189 | ||
| 2094 | if (rc != 0) | 2190 | if (rc != 0) |
| 2095 | cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); | 2191 | cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); |
| 2096 | 2192 | ||
| 2097 | free_rsp_buf(resp_buftype, iov[0].iov_base); | 2193 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
| 2098 | return rc; | 2194 | return rc; |
| 2099 | } | 2195 | } |
| 2100 | 2196 | ||
| @@ -2103,19 +2199,23 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
| 2103 | * have the end_of_chain boolean set to true. | 2199 | * have the end_of_chain boolean set to true. |
| 2104 | */ | 2200 | */ |
| 2105 | static int | 2201 | static int |
| 2106 | smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms, | 2202 | smb2_new_read_req(void **buf, unsigned int *total_len, |
| 2107 | unsigned int remaining_bytes, int request_type) | 2203 | struct cifs_io_parms *io_parms, unsigned int remaining_bytes, |
| 2204 | int request_type) | ||
| 2108 | { | 2205 | { |
| 2109 | int rc = -EACCES; | 2206 | int rc = -EACCES; |
| 2110 | struct smb2_read_req *req = NULL; | 2207 | struct smb2_read_plain_req *req = NULL; |
| 2208 | struct smb2_sync_hdr *shdr; | ||
| 2111 | 2209 | ||
| 2112 | rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req); | 2210 | rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req, |
| 2211 | total_len); | ||
| 2113 | if (rc) | 2212 | if (rc) |
| 2114 | return rc; | 2213 | return rc; |
| 2115 | if (io_parms->tcon->ses->server == NULL) | 2214 | if (io_parms->tcon->ses->server == NULL) |
| 2116 | return -ECONNABORTED; | 2215 | return -ECONNABORTED; |
| 2117 | 2216 | ||
| 2118 | req->hdr.ProcessId = cpu_to_le32(io_parms->pid); | 2217 | shdr = &req->sync_hdr; |
| 2218 | shdr->ProcessId = cpu_to_le32(io_parms->pid); | ||
| 2119 | 2219 | ||
| 2120 | req->PersistentFileId = io_parms->persistent_fid; | 2220 | req->PersistentFileId = io_parms->persistent_fid; |
| 2121 | req->VolatileFileId = io_parms->volatile_fid; | 2221 | req->VolatileFileId = io_parms->volatile_fid; |
| @@ -2128,19 +2228,19 @@ smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms, | |||
| 2128 | 2228 | ||
| 2129 | if (request_type & CHAINED_REQUEST) { | 2229 | if (request_type & CHAINED_REQUEST) { |
| 2130 | if (!(request_type & END_OF_CHAIN)) { | 2230 | if (!(request_type & END_OF_CHAIN)) { |
| 2131 | /* 4 for rfc1002 length field */ | 2231 | /* next 8-byte aligned request */ |
| 2132 | req->hdr.NextCommand = | 2232 | *total_len = DIV_ROUND_UP(*total_len, 8) * 8; |
| 2133 | cpu_to_le32(get_rfc1002_length(req) + 4); | 2233 | shdr->NextCommand = cpu_to_le32(*total_len); |
| 2134 | } else /* END_OF_CHAIN */ | 2234 | } else /* END_OF_CHAIN */ |
| 2135 | req->hdr.NextCommand = 0; | 2235 | shdr->NextCommand = 0; |
| 2136 | if (request_type & RELATED_REQUEST) { | 2236 | if (request_type & RELATED_REQUEST) { |
| 2137 | req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS; | 2237 | shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS; |
| 2138 | /* | 2238 | /* |
| 2139 | * Related requests use info from previous read request | 2239 | * Related requests use info from previous read request |
| 2140 | * in chain. | 2240 | * in chain. |
| 2141 | */ | 2241 | */ |
| 2142 | req->hdr.SessionId = 0xFFFFFFFF; | 2242 | shdr->SessionId = 0xFFFFFFFF; |
| 2143 | req->hdr.TreeId = 0xFFFFFFFF; | 2243 | shdr->TreeId = 0xFFFFFFFF; |
| 2144 | req->PersistentFileId = 0xFFFFFFFF; | 2244 | req->PersistentFileId = 0xFFFFFFFF; |
| 2145 | req->VolatileFileId = 0xFFFFFFFF; | 2245 | req->VolatileFileId = 0xFFFFFFFF; |
| 2146 | } | 2246 | } |
| @@ -2150,9 +2250,7 @@ smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms, | |||
| 2150 | else | 2250 | else |
| 2151 | req->RemainingBytes = 0; | 2251 | req->RemainingBytes = 0; |
| 2152 | 2252 | ||
| 2153 | iov[0].iov_base = (char *)req; | 2253 | *buf = req; |
| 2154 | /* 4 for rfc1002 length field */ | ||
| 2155 | iov[0].iov_len = get_rfc1002_length(req) + 4; | ||
| 2156 | return rc; | 2254 | return rc; |
| 2157 | } | 2255 | } |
| 2158 | 2256 | ||
| @@ -2162,10 +2260,11 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
| 2162 | struct cifs_readdata *rdata = mid->callback_data; | 2260 | struct cifs_readdata *rdata = mid->callback_data; |
| 2163 | struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); | 2261 | struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); |
| 2164 | struct TCP_Server_Info *server = tcon->ses->server; | 2262 | struct TCP_Server_Info *server = tcon->ses->server; |
| 2165 | struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base; | 2263 | struct smb2_sync_hdr *shdr = |
| 2264 | (struct smb2_sync_hdr *)rdata->iov[1].iov_base; | ||
| 2166 | unsigned int credits_received = 1; | 2265 | unsigned int credits_received = 1; |
| 2167 | struct smb_rqst rqst = { .rq_iov = &rdata->iov, | 2266 | struct smb_rqst rqst = { .rq_iov = rdata->iov, |
| 2168 | .rq_nvec = 1, | 2267 | .rq_nvec = 2, |
| 2169 | .rq_pages = rdata->pages, | 2268 | .rq_pages = rdata->pages, |
| 2170 | .rq_npages = rdata->nr_pages, | 2269 | .rq_npages = rdata->nr_pages, |
| 2171 | .rq_pagesz = rdata->pagesz, | 2270 | .rq_pagesz = rdata->pagesz, |
| @@ -2177,9 +2276,9 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
| 2177 | 2276 | ||
| 2178 | switch (mid->mid_state) { | 2277 | switch (mid->mid_state) { |
| 2179 | case MID_RESPONSE_RECEIVED: | 2278 | case MID_RESPONSE_RECEIVED: |
| 2180 | credits_received = le16_to_cpu(buf->CreditRequest); | 2279 | credits_received = le16_to_cpu(shdr->CreditRequest); |
| 2181 | /* result already set, check signature */ | 2280 | /* result already set, check signature */ |
| 2182 | if (server->sign) { | 2281 | if (server->sign && !mid->decrypted) { |
| 2183 | int rc; | 2282 | int rc; |
| 2184 | 2283 | ||
| 2185 | rc = smb2_verify_signature(&rqst, server); | 2284 | rc = smb2_verify_signature(&rqst, server); |
| @@ -2216,16 +2315,19 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
| 2216 | add_credits(server, credits_received, 0); | 2315 | add_credits(server, credits_received, 0); |
| 2217 | } | 2316 | } |
| 2218 | 2317 | ||
| 2219 | /* smb2_async_readv - send an async write, and set up mid to handle result */ | 2318 | /* smb2_async_readv - send an async read, and set up mid to handle result */ |
| 2220 | int | 2319 | int |
| 2221 | smb2_async_readv(struct cifs_readdata *rdata) | 2320 | smb2_async_readv(struct cifs_readdata *rdata) |
| 2222 | { | 2321 | { |
| 2223 | int rc, flags = 0; | 2322 | int rc, flags = 0; |
| 2224 | struct smb2_hdr *buf; | 2323 | char *buf; |
| 2324 | struct smb2_sync_hdr *shdr; | ||
| 2225 | struct cifs_io_parms io_parms; | 2325 | struct cifs_io_parms io_parms; |
| 2226 | struct smb_rqst rqst = { .rq_iov = &rdata->iov, | 2326 | struct smb_rqst rqst = { .rq_iov = rdata->iov, |
| 2227 | .rq_nvec = 1 }; | 2327 | .rq_nvec = 2 }; |
| 2228 | struct TCP_Server_Info *server; | 2328 | struct TCP_Server_Info *server; |
| 2329 | unsigned int total_len; | ||
| 2330 | __be32 req_len; | ||
| 2229 | 2331 | ||
| 2230 | cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", | 2332 | cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n", |
| 2231 | __func__, rdata->offset, rdata->bytes); | 2333 | __func__, rdata->offset, rdata->bytes); |
| @@ -2239,7 +2341,7 @@ smb2_async_readv(struct cifs_readdata *rdata) | |||
| 2239 | 2341 | ||
| 2240 | server = io_parms.tcon->ses->server; | 2342 | server = io_parms.tcon->ses->server; |
| 2241 | 2343 | ||
| 2242 | rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0); | 2344 | rc = smb2_new_read_req((void **) &buf, &total_len, &io_parms, 0, 0); |
| 2243 | if (rc) { | 2345 | if (rc) { |
| 2244 | if (rc == -EAGAIN && rdata->credits) { | 2346 | if (rc == -EAGAIN && rdata->credits) { |
| 2245 | /* credits was reset by reconnect */ | 2347 | /* credits was reset by reconnect */ |
| @@ -2252,26 +2354,34 @@ smb2_async_readv(struct cifs_readdata *rdata) | |||
| 2252 | return rc; | 2354 | return rc; |
| 2253 | } | 2355 | } |
| 2254 | 2356 | ||
| 2255 | buf = (struct smb2_hdr *)rdata->iov.iov_base; | 2357 | if (encryption_required(io_parms.tcon)) |
| 2256 | /* 4 for rfc1002 length field */ | 2358 | flags |= CIFS_TRANSFORM_REQ; |
| 2257 | rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4; | 2359 | |
| 2360 | req_len = cpu_to_be32(total_len); | ||
| 2361 | |||
| 2362 | rdata->iov[0].iov_base = &req_len; | ||
| 2363 | rdata->iov[0].iov_len = sizeof(__be32); | ||
| 2364 | rdata->iov[1].iov_base = buf; | ||
| 2365 | rdata->iov[1].iov_len = total_len; | ||
| 2366 | |||
| 2367 | shdr = (struct smb2_sync_hdr *)buf; | ||
| 2258 | 2368 | ||
| 2259 | if (rdata->credits) { | 2369 | if (rdata->credits) { |
| 2260 | buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, | 2370 | shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, |
| 2261 | SMB2_MAX_BUFFER_SIZE)); | 2371 | SMB2_MAX_BUFFER_SIZE)); |
| 2262 | buf->CreditRequest = buf->CreditCharge; | 2372 | shdr->CreditRequest = shdr->CreditCharge; |
| 2263 | spin_lock(&server->req_lock); | 2373 | spin_lock(&server->req_lock); |
| 2264 | server->credits += rdata->credits - | 2374 | server->credits += rdata->credits - |
| 2265 | le16_to_cpu(buf->CreditCharge); | 2375 | le16_to_cpu(shdr->CreditCharge); |
| 2266 | spin_unlock(&server->req_lock); | 2376 | spin_unlock(&server->req_lock); |
| 2267 | wake_up(&server->request_q); | 2377 | wake_up(&server->request_q); |
| 2268 | flags = CIFS_HAS_CREDITS; | 2378 | flags |= CIFS_HAS_CREDITS; |
| 2269 | } | 2379 | } |
| 2270 | 2380 | ||
| 2271 | kref_get(&rdata->refcount); | 2381 | kref_get(&rdata->refcount); |
| 2272 | rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, | 2382 | rc = cifs_call_async(io_parms.tcon->ses->server, &rqst, |
| 2273 | cifs_readv_receive, smb2_readv_callback, | 2383 | cifs_readv_receive, smb2_readv_callback, |
| 2274 | rdata, flags); | 2384 | smb3_handle_read_data, rdata, flags); |
| 2275 | if (rc) { | 2385 | if (rc) { |
| 2276 | kref_put(&rdata->refcount, cifs_readdata_release); | 2386 | kref_put(&rdata->refcount, cifs_readdata_release); |
| 2277 | cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); | 2387 | cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); |
| @@ -2286,21 +2396,41 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2286 | unsigned int *nbytes, char **buf, int *buf_type) | 2396 | unsigned int *nbytes, char **buf, int *buf_type) |
| 2287 | { | 2397 | { |
| 2288 | int resp_buftype, rc = -EACCES; | 2398 | int resp_buftype, rc = -EACCES; |
| 2399 | struct smb2_read_plain_req *req = NULL; | ||
| 2289 | struct smb2_read_rsp *rsp = NULL; | 2400 | struct smb2_read_rsp *rsp = NULL; |
| 2290 | struct kvec iov[1]; | 2401 | struct smb2_sync_hdr *shdr; |
| 2402 | struct kvec iov[2]; | ||
| 2403 | struct kvec rsp_iov; | ||
| 2404 | unsigned int total_len; | ||
| 2405 | __be32 req_len; | ||
| 2406 | struct smb_rqst rqst = { .rq_iov = iov, | ||
| 2407 | .rq_nvec = 2 }; | ||
| 2408 | int flags = CIFS_LOG_ERROR; | ||
| 2409 | struct cifs_ses *ses = io_parms->tcon->ses; | ||
| 2291 | 2410 | ||
| 2292 | *nbytes = 0; | 2411 | *nbytes = 0; |
| 2293 | rc = smb2_new_read_req(iov, io_parms, 0, 0); | 2412 | rc = smb2_new_read_req((void **)&req, &total_len, io_parms, 0, 0); |
| 2294 | if (rc) | 2413 | if (rc) |
| 2295 | return rc; | 2414 | return rc; |
| 2296 | 2415 | ||
| 2297 | rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1, | 2416 | if (encryption_required(io_parms->tcon)) |
| 2298 | &resp_buftype, CIFS_LOG_ERROR); | 2417 | flags |= CIFS_TRANSFORM_REQ; |
| 2299 | 2418 | ||
| 2300 | rsp = (struct smb2_read_rsp *)iov[0].iov_base; | 2419 | req_len = cpu_to_be32(total_len); |
| 2301 | 2420 | ||
| 2302 | if (rsp->hdr.Status == STATUS_END_OF_FILE) { | 2421 | iov[0].iov_base = &req_len; |
| 2303 | free_rsp_buf(resp_buftype, iov[0].iov_base); | 2422 | iov[0].iov_len = sizeof(__be32); |
| 2423 | iov[1].iov_base = req; | ||
| 2424 | iov[1].iov_len = total_len; | ||
| 2425 | |||
| 2426 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); | ||
| 2427 | cifs_small_buf_release(req); | ||
| 2428 | |||
| 2429 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; | ||
| 2430 | shdr = get_sync_hdr(rsp); | ||
| 2431 | |||
| 2432 | if (shdr->Status == STATUS_END_OF_FILE) { | ||
| 2433 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); | ||
| 2304 | return 0; | 2434 | return 0; |
| 2305 | } | 2435 | } |
| 2306 | 2436 | ||
| @@ -2319,11 +2449,10 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2319 | } | 2449 | } |
| 2320 | 2450 | ||
| 2321 | if (*buf) { | 2451 | if (*buf) { |
| 2322 | memcpy(*buf, (char *)&rsp->hdr.ProtocolId + rsp->DataOffset, | 2452 | memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); |
| 2323 | *nbytes); | 2453 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
| 2324 | free_rsp_buf(resp_buftype, iov[0].iov_base); | ||
| 2325 | } else if (resp_buftype != CIFS_NO_BUFFER) { | 2454 | } else if (resp_buftype != CIFS_NO_BUFFER) { |
| 2326 | *buf = iov[0].iov_base; | 2455 | *buf = rsp_iov.iov_base; |
| 2327 | if (resp_buftype == CIFS_SMALL_BUFFER) | 2456 | if (resp_buftype == CIFS_SMALL_BUFFER) |
| 2328 | *buf_type = CIFS_SMALL_BUFFER; | 2457 | *buf_type = CIFS_SMALL_BUFFER; |
| 2329 | else if (resp_buftype == CIFS_LARGE_BUFFER) | 2458 | else if (resp_buftype == CIFS_LARGE_BUFFER) |
| @@ -2348,7 +2477,7 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
| 2348 | 2477 | ||
| 2349 | switch (mid->mid_state) { | 2478 | switch (mid->mid_state) { |
| 2350 | case MID_RESPONSE_RECEIVED: | 2479 | case MID_RESPONSE_RECEIVED: |
| 2351 | credits_received = le16_to_cpu(rsp->hdr.CreditRequest); | 2480 | credits_received = le16_to_cpu(rsp->hdr.sync_hdr.CreditRequest); |
| 2352 | wdata->result = smb2_check_receive(mid, tcon->ses->server, 0); | 2481 | wdata->result = smb2_check_receive(mid, tcon->ses->server, 0); |
| 2353 | if (wdata->result != 0) | 2482 | if (wdata->result != 0) |
| 2354 | break; | 2483 | break; |
| @@ -2394,10 +2523,11 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
| 2394 | { | 2523 | { |
| 2395 | int rc = -EACCES, flags = 0; | 2524 | int rc = -EACCES, flags = 0; |
| 2396 | struct smb2_write_req *req = NULL; | 2525 | struct smb2_write_req *req = NULL; |
| 2526 | struct smb2_sync_hdr *shdr; | ||
| 2397 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | 2527 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); |
| 2398 | struct TCP_Server_Info *server = tcon->ses->server; | 2528 | struct TCP_Server_Info *server = tcon->ses->server; |
| 2399 | struct kvec iov; | 2529 | struct kvec iov[2]; |
| 2400 | struct smb_rqst rqst; | 2530 | struct smb_rqst rqst = { }; |
| 2401 | 2531 | ||
| 2402 | rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); | 2532 | rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); |
| 2403 | if (rc) { | 2533 | if (rc) { |
| @@ -2412,7 +2542,11 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
| 2412 | goto async_writev_out; | 2542 | goto async_writev_out; |
| 2413 | } | 2543 | } |
| 2414 | 2544 | ||
| 2415 | req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid); | 2545 | if (encryption_required(tcon)) |
| 2546 | flags |= CIFS_TRANSFORM_REQ; | ||
| 2547 | |||
| 2548 | shdr = get_sync_hdr(req); | ||
| 2549 | shdr->ProcessId = cpu_to_le32(wdata->cfile->pid); | ||
| 2416 | 2550 | ||
| 2417 | req->PersistentFileId = wdata->cfile->fid.persistent_fid; | 2551 | req->PersistentFileId = wdata->cfile->fid.persistent_fid; |
| 2418 | req->VolatileFileId = wdata->cfile->fid.volatile_fid; | 2552 | req->VolatileFileId = wdata->cfile->fid.volatile_fid; |
| @@ -2426,11 +2560,13 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
| 2426 | req->RemainingBytes = 0; | 2560 | req->RemainingBytes = 0; |
| 2427 | 2561 | ||
| 2428 | /* 4 for rfc1002 length field and 1 for Buffer */ | 2562 | /* 4 for rfc1002 length field and 1 for Buffer */ |
| 2429 | iov.iov_len = get_rfc1002_length(req) + 4 - 1; | 2563 | iov[0].iov_len = 4; |
| 2430 | iov.iov_base = req; | 2564 | iov[0].iov_base = req; |
| 2565 | iov[1].iov_len = get_rfc1002_length(req) - 1; | ||
| 2566 | iov[1].iov_base = (char *)req + 4; | ||
| 2431 | 2567 | ||
| 2432 | rqst.rq_iov = &iov; | 2568 | rqst.rq_iov = iov; |
| 2433 | rqst.rq_nvec = 1; | 2569 | rqst.rq_nvec = 2; |
| 2434 | rqst.rq_pages = wdata->pages; | 2570 | rqst.rq_pages = wdata->pages; |
| 2435 | rqst.rq_npages = wdata->nr_pages; | 2571 | rqst.rq_npages = wdata->nr_pages; |
| 2436 | rqst.rq_pagesz = wdata->pagesz; | 2572 | rqst.rq_pagesz = wdata->pagesz; |
| @@ -2444,20 +2580,20 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
| 2444 | inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); | 2580 | inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); |
| 2445 | 2581 | ||
| 2446 | if (wdata->credits) { | 2582 | if (wdata->credits) { |
| 2447 | req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, | 2583 | shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, |
| 2448 | SMB2_MAX_BUFFER_SIZE)); | 2584 | SMB2_MAX_BUFFER_SIZE)); |
| 2449 | req->hdr.CreditRequest = req->hdr.CreditCharge; | 2585 | shdr->CreditRequest = shdr->CreditCharge; |
| 2450 | spin_lock(&server->req_lock); | 2586 | spin_lock(&server->req_lock); |
| 2451 | server->credits += wdata->credits - | 2587 | server->credits += wdata->credits - |
| 2452 | le16_to_cpu(req->hdr.CreditCharge); | 2588 | le16_to_cpu(shdr->CreditCharge); |
| 2453 | spin_unlock(&server->req_lock); | 2589 | spin_unlock(&server->req_lock); |
| 2454 | wake_up(&server->request_q); | 2590 | wake_up(&server->request_q); |
| 2455 | flags = CIFS_HAS_CREDITS; | 2591 | flags |= CIFS_HAS_CREDITS; |
| 2456 | } | 2592 | } |
| 2457 | 2593 | ||
| 2458 | kref_get(&wdata->refcount); | 2594 | kref_get(&wdata->refcount); |
| 2459 | rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata, | 2595 | rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL, |
| 2460 | flags); | 2596 | wdata, flags); |
| 2461 | 2597 | ||
| 2462 | if (rc) { | 2598 | if (rc) { |
| 2463 | kref_put(&wdata->refcount, release); | 2599 | kref_put(&wdata->refcount, release); |
| @@ -2483,6 +2619,9 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2483 | struct smb2_write_req *req = NULL; | 2619 | struct smb2_write_req *req = NULL; |
| 2484 | struct smb2_write_rsp *rsp = NULL; | 2620 | struct smb2_write_rsp *rsp = NULL; |
| 2485 | int resp_buftype; | 2621 | int resp_buftype; |
| 2622 | struct kvec rsp_iov; | ||
| 2623 | int flags = 0; | ||
| 2624 | |||
| 2486 | *nbytes = 0; | 2625 | *nbytes = 0; |
| 2487 | 2626 | ||
| 2488 | if (n_vec < 1) | 2627 | if (n_vec < 1) |
| @@ -2495,7 +2634,10 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2495 | if (io_parms->tcon->ses->server == NULL) | 2634 | if (io_parms->tcon->ses->server == NULL) |
| 2496 | return -ECONNABORTED; | 2635 | return -ECONNABORTED; |
| 2497 | 2636 | ||
| 2498 | req->hdr.ProcessId = cpu_to_le32(io_parms->pid); | 2637 | if (encryption_required(io_parms->tcon)) |
| 2638 | flags |= CIFS_TRANSFORM_REQ; | ||
| 2639 | |||
| 2640 | req->hdr.sync_hdr.ProcessId = cpu_to_le32(io_parms->pid); | ||
| 2499 | 2641 | ||
| 2500 | req->PersistentFileId = io_parms->persistent_fid; | 2642 | req->PersistentFileId = io_parms->persistent_fid; |
| 2501 | req->VolatileFileId = io_parms->volatile_fid; | 2643 | req->VolatileFileId = io_parms->volatile_fid; |
| @@ -2517,8 +2659,9 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2517 | inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); | 2659 | inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); |
| 2518 | 2660 | ||
| 2519 | rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1, | 2661 | rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1, |
| 2520 | &resp_buftype, 0); | 2662 | &resp_buftype, flags, &rsp_iov); |
| 2521 | rsp = (struct smb2_write_rsp *)iov[0].iov_base; | 2663 | cifs_small_buf_release(req); |
| 2664 | rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; | ||
| 2522 | 2665 | ||
| 2523 | if (rc) { | 2666 | if (rc) { |
| 2524 | cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); | 2667 | cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE); |
| @@ -2581,6 +2724,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2581 | struct smb2_query_directory_req *req; | 2724 | struct smb2_query_directory_req *req; |
| 2582 | struct smb2_query_directory_rsp *rsp = NULL; | 2725 | struct smb2_query_directory_rsp *rsp = NULL; |
| 2583 | struct kvec iov[2]; | 2726 | struct kvec iov[2]; |
| 2727 | struct kvec rsp_iov; | ||
| 2584 | int rc = 0; | 2728 | int rc = 0; |
| 2585 | int len; | 2729 | int len; |
| 2586 | int resp_buftype = CIFS_NO_BUFFER; | 2730 | int resp_buftype = CIFS_NO_BUFFER; |
| @@ -2591,6 +2735,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2591 | char *end_of_smb; | 2735 | char *end_of_smb; |
| 2592 | unsigned int output_size = CIFSMaxBufSize; | 2736 | unsigned int output_size = CIFSMaxBufSize; |
| 2593 | size_t info_buf_size; | 2737 | size_t info_buf_size; |
| 2738 | int flags = 0; | ||
| 2594 | 2739 | ||
| 2595 | if (ses && (ses->server)) | 2740 | if (ses && (ses->server)) |
| 2596 | server = ses->server; | 2741 | server = ses->server; |
| @@ -2601,6 +2746,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2601 | if (rc) | 2746 | if (rc) |
| 2602 | return rc; | 2747 | return rc; |
| 2603 | 2748 | ||
| 2749 | if (encryption_required(tcon)) | ||
| 2750 | flags |= CIFS_TRANSFORM_REQ; | ||
| 2751 | |||
| 2604 | switch (srch_inf->info_level) { | 2752 | switch (srch_inf->info_level) { |
| 2605 | case SMB_FIND_FILE_DIRECTORY_INFO: | 2753 | case SMB_FIND_FILE_DIRECTORY_INFO: |
| 2606 | req->FileInformationClass = FILE_DIRECTORY_INFORMATION; | 2754 | req->FileInformationClass = FILE_DIRECTORY_INFORMATION; |
| @@ -2645,11 +2793,13 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2645 | 2793 | ||
| 2646 | inc_rfc1001_len(req, len - 1 /* Buffer */); | 2794 | inc_rfc1001_len(req, len - 1 /* Buffer */); |
| 2647 | 2795 | ||
| 2648 | rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0); | 2796 | rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); |
| 2649 | rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base; | 2797 | cifs_small_buf_release(req); |
| 2798 | rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; | ||
| 2650 | 2799 | ||
| 2651 | if (rc) { | 2800 | if (rc) { |
| 2652 | if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) { | 2801 | if (rc == -ENODATA && |
| 2802 | rsp->hdr.sync_hdr.Status == STATUS_NO_MORE_FILES) { | ||
| 2653 | srch_inf->endOfSearch = true; | 2803 | srch_inf->endOfSearch = true; |
| 2654 | rc = 0; | 2804 | rc = 0; |
| 2655 | } | 2805 | } |
| @@ -2705,11 +2855,13 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2705 | struct smb2_set_info_req *req; | 2855 | struct smb2_set_info_req *req; |
| 2706 | struct smb2_set_info_rsp *rsp = NULL; | 2856 | struct smb2_set_info_rsp *rsp = NULL; |
| 2707 | struct kvec *iov; | 2857 | struct kvec *iov; |
| 2858 | struct kvec rsp_iov; | ||
| 2708 | int rc = 0; | 2859 | int rc = 0; |
| 2709 | int resp_buftype; | 2860 | int resp_buftype; |
| 2710 | unsigned int i; | 2861 | unsigned int i; |
| 2711 | struct TCP_Server_Info *server; | 2862 | struct TCP_Server_Info *server; |
| 2712 | struct cifs_ses *ses = tcon->ses; | 2863 | struct cifs_ses *ses = tcon->ses; |
| 2864 | int flags = 0; | ||
| 2713 | 2865 | ||
| 2714 | if (ses && (ses->server)) | 2866 | if (ses && (ses->server)) |
| 2715 | server = ses->server; | 2867 | server = ses->server; |
| @@ -2729,7 +2881,10 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2729 | return rc; | 2881 | return rc; |
| 2730 | } | 2882 | } |
| 2731 | 2883 | ||
| 2732 | req->hdr.ProcessId = cpu_to_le32(pid); | 2884 | if (encryption_required(tcon)) |
| 2885 | flags |= CIFS_TRANSFORM_REQ; | ||
| 2886 | |||
| 2887 | req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); | ||
| 2733 | 2888 | ||
| 2734 | req->InfoType = SMB2_O_INFO_FILE; | 2889 | req->InfoType = SMB2_O_INFO_FILE; |
| 2735 | req->FileInfoClass = info_class; | 2890 | req->FileInfoClass = info_class; |
| @@ -2756,8 +2911,9 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2756 | iov[i].iov_len = size[i]; | 2911 | iov[i].iov_len = size[i]; |
| 2757 | } | 2912 | } |
| 2758 | 2913 | ||
| 2759 | rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0); | 2914 | rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); |
| 2760 | rsp = (struct smb2_set_info_rsp *)iov[0].iov_base; | 2915 | cifs_small_buf_release(req); |
| 2916 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; | ||
| 2761 | 2917 | ||
| 2762 | if (rc != 0) | 2918 | if (rc != 0) |
| 2763 | cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); | 2919 | cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE); |
| @@ -2885,20 +3041,23 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2885 | { | 3041 | { |
| 2886 | int rc; | 3042 | int rc; |
| 2887 | struct smb2_oplock_break *req = NULL; | 3043 | struct smb2_oplock_break *req = NULL; |
| 3044 | int flags = CIFS_OBREAK_OP; | ||
| 2888 | 3045 | ||
| 2889 | cifs_dbg(FYI, "SMB2_oplock_break\n"); | 3046 | cifs_dbg(FYI, "SMB2_oplock_break\n"); |
| 2890 | rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); | 3047 | rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); |
| 2891 | |||
| 2892 | if (rc) | 3048 | if (rc) |
| 2893 | return rc; | 3049 | return rc; |
| 2894 | 3050 | ||
| 3051 | if (encryption_required(tcon)) | ||
| 3052 | flags |= CIFS_TRANSFORM_REQ; | ||
| 3053 | |||
| 2895 | req->VolatileFid = volatile_fid; | 3054 | req->VolatileFid = volatile_fid; |
| 2896 | req->PersistentFid = persistent_fid; | 3055 | req->PersistentFid = persistent_fid; |
| 2897 | req->OplockLevel = oplock_level; | 3056 | req->OplockLevel = oplock_level; |
| 2898 | req->hdr.CreditRequest = cpu_to_le16(1); | 3057 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); |
| 2899 | 3058 | ||
| 2900 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP); | 3059 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); |
| 2901 | /* SMB2 buffer freed by function above */ | 3060 | cifs_small_buf_release(req); |
| 2902 | 3061 | ||
| 2903 | if (rc) { | 3062 | if (rc) { |
| 2904 | cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); | 3063 | cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); |
| @@ -2958,10 +3117,12 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2958 | { | 3117 | { |
| 2959 | struct smb2_query_info_rsp *rsp = NULL; | 3118 | struct smb2_query_info_rsp *rsp = NULL; |
| 2960 | struct kvec iov; | 3119 | struct kvec iov; |
| 3120 | struct kvec rsp_iov; | ||
| 2961 | int rc = 0; | 3121 | int rc = 0; |
| 2962 | int resp_buftype; | 3122 | int resp_buftype; |
| 2963 | struct cifs_ses *ses = tcon->ses; | 3123 | struct cifs_ses *ses = tcon->ses; |
| 2964 | struct smb2_fs_full_size_info *info = NULL; | 3124 | struct smb2_fs_full_size_info *info = NULL; |
| 3125 | int flags = 0; | ||
| 2965 | 3126 | ||
| 2966 | rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION, | 3127 | rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION, |
| 2967 | sizeof(struct smb2_fs_full_size_info), | 3128 | sizeof(struct smb2_fs_full_size_info), |
| @@ -2969,12 +3130,16 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2969 | if (rc) | 3130 | if (rc) |
| 2970 | return rc; | 3131 | return rc; |
| 2971 | 3132 | ||
| 2972 | rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0); | 3133 | if (encryption_required(tcon)) |
| 3134 | flags |= CIFS_TRANSFORM_REQ; | ||
| 3135 | |||
| 3136 | rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); | ||
| 3137 | cifs_small_buf_release(iov.iov_base); | ||
| 2973 | if (rc) { | 3138 | if (rc) { |
| 2974 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); | 3139 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); |
| 2975 | goto qfsinf_exit; | 3140 | goto qfsinf_exit; |
| 2976 | } | 3141 | } |
| 2977 | rsp = (struct smb2_query_info_rsp *)iov.iov_base; | 3142 | rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; |
| 2978 | 3143 | ||
| 2979 | info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ + | 3144 | info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ + |
| 2980 | le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr); | 3145 | le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr); |
| @@ -2985,7 +3150,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2985 | copy_fs_info_to_kstatfs(info, fsdata); | 3150 | copy_fs_info_to_kstatfs(info, fsdata); |
| 2986 | 3151 | ||
| 2987 | qfsinf_exit: | 3152 | qfsinf_exit: |
| 2988 | free_rsp_buf(resp_buftype, iov.iov_base); | 3153 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
| 2989 | return rc; | 3154 | return rc; |
| 2990 | } | 3155 | } |
| 2991 | 3156 | ||
| @@ -2995,10 +3160,12 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 2995 | { | 3160 | { |
| 2996 | struct smb2_query_info_rsp *rsp = NULL; | 3161 | struct smb2_query_info_rsp *rsp = NULL; |
| 2997 | struct kvec iov; | 3162 | struct kvec iov; |
| 3163 | struct kvec rsp_iov; | ||
| 2998 | int rc = 0; | 3164 | int rc = 0; |
| 2999 | int resp_buftype, max_len, min_len; | 3165 | int resp_buftype, max_len, min_len; |
| 3000 | struct cifs_ses *ses = tcon->ses; | 3166 | struct cifs_ses *ses = tcon->ses; |
| 3001 | unsigned int rsp_len, offset; | 3167 | unsigned int rsp_len, offset; |
| 3168 | int flags = 0; | ||
| 3002 | 3169 | ||
| 3003 | if (level == FS_DEVICE_INFORMATION) { | 3170 | if (level == FS_DEVICE_INFORMATION) { |
| 3004 | max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); | 3171 | max_len = sizeof(FILE_SYSTEM_DEVICE_INFO); |
| @@ -3019,12 +3186,16 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 3019 | if (rc) | 3186 | if (rc) |
| 3020 | return rc; | 3187 | return rc; |
| 3021 | 3188 | ||
| 3022 | rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0); | 3189 | if (encryption_required(tcon)) |
| 3190 | flags |= CIFS_TRANSFORM_REQ; | ||
| 3191 | |||
| 3192 | rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); | ||
| 3193 | cifs_small_buf_release(iov.iov_base); | ||
| 3023 | if (rc) { | 3194 | if (rc) { |
| 3024 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); | 3195 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); |
| 3025 | goto qfsattr_exit; | 3196 | goto qfsattr_exit; |
| 3026 | } | 3197 | } |
| 3027 | rsp = (struct smb2_query_info_rsp *)iov.iov_base; | 3198 | rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; |
| 3028 | 3199 | ||
| 3029 | rsp_len = le32_to_cpu(rsp->OutputBufferLength); | 3200 | rsp_len = le32_to_cpu(rsp->OutputBufferLength); |
| 3030 | offset = le16_to_cpu(rsp->OutputBufferOffset); | 3201 | offset = le16_to_cpu(rsp->OutputBufferOffset); |
| @@ -3048,7 +3219,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 3048 | } | 3219 | } |
| 3049 | 3220 | ||
| 3050 | qfsattr_exit: | 3221 | qfsattr_exit: |
| 3051 | free_rsp_buf(resp_buftype, iov.iov_base); | 3222 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
| 3052 | return rc; | 3223 | return rc; |
| 3053 | } | 3224 | } |
| 3054 | 3225 | ||
| @@ -3060,8 +3231,10 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 3060 | int rc = 0; | 3231 | int rc = 0; |
| 3061 | struct smb2_lock_req *req = NULL; | 3232 | struct smb2_lock_req *req = NULL; |
| 3062 | struct kvec iov[2]; | 3233 | struct kvec iov[2]; |
| 3234 | struct kvec rsp_iov; | ||
| 3063 | int resp_buf_type; | 3235 | int resp_buf_type; |
| 3064 | unsigned int count; | 3236 | unsigned int count; |
| 3237 | int flags = CIFS_NO_RESP; | ||
| 3065 | 3238 | ||
| 3066 | cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); | 3239 | cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); |
| 3067 | 3240 | ||
| @@ -3069,7 +3242,10 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 3069 | if (rc) | 3242 | if (rc) |
| 3070 | return rc; | 3243 | return rc; |
| 3071 | 3244 | ||
| 3072 | req->hdr.ProcessId = cpu_to_le32(pid); | 3245 | if (encryption_required(tcon)) |
| 3246 | flags |= CIFS_TRANSFORM_REQ; | ||
| 3247 | |||
| 3248 | req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); | ||
| 3073 | req->LockCount = cpu_to_le16(num_lock); | 3249 | req->LockCount = cpu_to_le16(num_lock); |
| 3074 | 3250 | ||
| 3075 | req->PersistentFileId = persist_fid; | 3251 | req->PersistentFileId = persist_fid; |
| @@ -3085,7 +3261,9 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 3085 | iov[1].iov_len = count; | 3261 | iov[1].iov_len = count; |
| 3086 | 3262 | ||
| 3087 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); | 3263 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); |
| 3088 | rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP); | 3264 | rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, flags, |
| 3265 | &rsp_iov); | ||
| 3266 | cifs_small_buf_release(req); | ||
| 3089 | if (rc) { | 3267 | if (rc) { |
| 3090 | cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); | 3268 | cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); |
| 3091 | cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); | 3269 | cifs_stats_fail_inc(tcon, SMB2_LOCK_HE); |
| @@ -3117,22 +3295,25 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 3117 | { | 3295 | { |
| 3118 | int rc; | 3296 | int rc; |
| 3119 | struct smb2_lease_ack *req = NULL; | 3297 | struct smb2_lease_ack *req = NULL; |
| 3298 | int flags = CIFS_OBREAK_OP; | ||
| 3120 | 3299 | ||
| 3121 | cifs_dbg(FYI, "SMB2_lease_break\n"); | 3300 | cifs_dbg(FYI, "SMB2_lease_break\n"); |
| 3122 | rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); | 3301 | rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); |
| 3123 | |||
| 3124 | if (rc) | 3302 | if (rc) |
| 3125 | return rc; | 3303 | return rc; |
| 3126 | 3304 | ||
| 3127 | req->hdr.CreditRequest = cpu_to_le16(1); | 3305 | if (encryption_required(tcon)) |
| 3306 | flags |= CIFS_TRANSFORM_REQ; | ||
| 3307 | |||
| 3308 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); | ||
| 3128 | req->StructureSize = cpu_to_le16(36); | 3309 | req->StructureSize = cpu_to_le16(36); |
| 3129 | inc_rfc1001_len(req, 12); | 3310 | inc_rfc1001_len(req, 12); |
| 3130 | 3311 | ||
| 3131 | memcpy(req->LeaseKey, lease_key, 16); | 3312 | memcpy(req->LeaseKey, lease_key, 16); |
| 3132 | req->LeaseState = lease_state; | 3313 | req->LeaseState = lease_state; |
| 3133 | 3314 | ||
| 3134 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP); | 3315 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); |
| 3135 | /* SMB2 buffer freed by function above */ | 3316 | cifs_small_buf_release(req); |
| 3136 | 3317 | ||
| 3137 | if (rc) { | 3318 | if (rc) { |
| 3138 | cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); | 3319 | cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index dc0d141f33e2..c03b252501a1 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
| @@ -101,10 +101,7 @@ | |||
| 101 | 101 | ||
| 102 | #define SMB2_HEADER_STRUCTURE_SIZE cpu_to_le16(64) | 102 | #define SMB2_HEADER_STRUCTURE_SIZE cpu_to_le16(64) |
| 103 | 103 | ||
| 104 | struct smb2_hdr { | 104 | struct smb2_sync_hdr { |
| 105 | __be32 smb2_buf_length; /* big endian on wire */ | ||
| 106 | /* length is only two or three bytes - with | ||
| 107 | one or two byte type preceding it that MBZ */ | ||
| 108 | __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */ | 105 | __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */ |
| 109 | __le16 StructureSize; /* 64 */ | 106 | __le16 StructureSize; /* 64 */ |
| 110 | __le16 CreditCharge; /* MBZ */ | 107 | __le16 CreditCharge; /* MBZ */ |
| @@ -120,16 +117,31 @@ struct smb2_hdr { | |||
| 120 | __u8 Signature[16]; | 117 | __u8 Signature[16]; |
| 121 | } __packed; | 118 | } __packed; |
| 122 | 119 | ||
| 120 | struct smb2_sync_pdu { | ||
| 121 | struct smb2_sync_hdr sync_hdr; | ||
| 122 | __le16 StructureSize2; /* size of wct area (varies, request specific) */ | ||
| 123 | } __packed; | ||
| 124 | |||
| 125 | struct smb2_hdr { | ||
| 126 | __be32 smb2_buf_length; /* big endian on wire */ | ||
| 127 | /* length is only two or three bytes - with */ | ||
| 128 | /* one or two byte type preceding it that MBZ */ | ||
| 129 | struct smb2_sync_hdr sync_hdr; | ||
| 130 | } __packed; | ||
| 131 | |||
| 123 | struct smb2_pdu { | 132 | struct smb2_pdu { |
| 124 | struct smb2_hdr hdr; | 133 | struct smb2_hdr hdr; |
| 125 | __le16 StructureSize2; /* size of wct area (varies, request specific) */ | 134 | __le16 StructureSize2; /* size of wct area (varies, request specific) */ |
| 126 | } __packed; | 135 | } __packed; |
| 127 | 136 | ||
| 137 | #define SMB3_AES128CMM_NONCE 11 | ||
| 138 | #define SMB3_AES128GCM_NONCE 12 | ||
| 139 | |||
| 128 | struct smb2_transform_hdr { | 140 | struct smb2_transform_hdr { |
| 129 | __be32 smb2_buf_length; /* big endian on wire */ | 141 | __be32 smb2_buf_length; /* big endian on wire */ |
| 130 | /* length is only two or three bytes - with | 142 | /* length is only two or three bytes - with |
| 131 | one or two byte type preceding it that MBZ */ | 143 | one or two byte type preceding it that MBZ */ |
| 132 | __u8 ProtocolId[4]; /* 0xFD 'S' 'M' 'B' */ | 144 | __le32 ProtocolId; /* 0xFD 'S' 'M' 'B' */ |
| 133 | __u8 Signature[16]; | 145 | __u8 Signature[16]; |
| 134 | __u8 Nonce[16]; | 146 | __u8 Nonce[16]; |
| 135 | __le32 OriginalMessageSize; | 147 | __le32 OriginalMessageSize; |
| @@ -814,8 +826,9 @@ struct smb2_flush_rsp { | |||
| 814 | #define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ | 826 | #define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ |
| 815 | #define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */ | 827 | #define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */ |
| 816 | 828 | ||
| 817 | struct smb2_read_req { | 829 | /* SMB2 read request without RFC1001 length at the beginning */ |
| 818 | struct smb2_hdr hdr; | 830 | struct smb2_read_plain_req { |
| 831 | struct smb2_sync_hdr sync_hdr; | ||
| 819 | __le16 StructureSize; /* Must be 49 */ | 832 | __le16 StructureSize; /* Must be 49 */ |
| 820 | __u8 Padding; /* offset from start of SMB2 header to place read */ | 833 | __u8 Padding; /* offset from start of SMB2 header to place read */ |
| 821 | __u8 Flags; /* MBZ unless SMB3.02 or later */ | 834 | __u8 Flags; /* MBZ unless SMB3.02 or later */ |
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index f2d511a6971b..85fc7a789334 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
| @@ -56,6 +56,10 @@ extern void smb2_echo_request(struct work_struct *work); | |||
| 56 | extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode); | 56 | extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode); |
| 57 | extern bool smb2_is_valid_oplock_break(char *buffer, | 57 | extern bool smb2_is_valid_oplock_break(char *buffer, |
| 58 | struct TCP_Server_Info *srv); | 58 | struct TCP_Server_Info *srv); |
| 59 | extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server, | ||
| 60 | __u64 ses_id); | ||
| 61 | extern int smb3_handle_read_data(struct TCP_Server_Info *server, | ||
| 62 | struct mid_q_entry *mid); | ||
| 59 | 63 | ||
| 60 | extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst, | 64 | extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst, |
| 61 | struct smb2_file_all_info *src); | 65 | struct smb2_file_all_info *src); |
| @@ -97,6 +101,7 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile, | |||
| 97 | struct file_lock *flock, const unsigned int xid); | 101 | struct file_lock *flock, const unsigned int xid); |
| 98 | extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); | 102 | extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); |
| 99 | extern void smb2_reconnect_server(struct work_struct *work); | 103 | extern void smb2_reconnect_server(struct work_struct *work); |
| 104 | extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server); | ||
| 100 | 105 | ||
| 101 | /* | 106 | /* |
| 102 | * SMB2 Worker functions - most of protocol specific implementation details | 107 | * SMB2 Worker functions - most of protocol specific implementation details |
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index bc9a7b634643..7c3bb1bd7eed 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
| 32 | #include <linux/mempool.h> | 32 | #include <linux/mempool.h> |
| 33 | #include <linux/highmem.h> | 33 | #include <linux/highmem.h> |
| 34 | #include <crypto/aead.h> | ||
| 34 | #include "smb2pdu.h" | 35 | #include "smb2pdu.h" |
| 35 | #include "cifsglob.h" | 36 | #include "cifsglob.h" |
| 36 | #include "cifsproto.h" | 37 | #include "cifsproto.h" |
| @@ -114,14 +115,14 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) | |||
| 114 | return 0; | 115 | return 0; |
| 115 | } | 116 | } |
| 116 | 117 | ||
| 117 | static struct cifs_ses * | 118 | struct cifs_ses * |
| 118 | smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server) | 119 | smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) |
| 119 | { | 120 | { |
| 120 | struct cifs_ses *ses; | 121 | struct cifs_ses *ses; |
| 121 | 122 | ||
| 122 | spin_lock(&cifs_tcp_ses_lock); | 123 | spin_lock(&cifs_tcp_ses_lock); |
| 123 | list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { | 124 | list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { |
| 124 | if (ses->Suid != smb2hdr->SessionId) | 125 | if (ses->Suid != ses_id) |
| 125 | continue; | 126 | continue; |
| 126 | spin_unlock(&cifs_tcp_ses_lock); | 127 | spin_unlock(&cifs_tcp_ses_lock); |
| 127 | return ses; | 128 | return ses; |
| @@ -131,7 +132,6 @@ smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server) | |||
| 131 | return NULL; | 132 | return NULL; |
| 132 | } | 133 | } |
| 133 | 134 | ||
| 134 | |||
| 135 | int | 135 | int |
| 136 | smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | 136 | smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) |
| 137 | { | 137 | { |
| @@ -139,17 +139,17 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 139 | unsigned char smb2_signature[SMB2_HMACSHA256_SIZE]; | 139 | unsigned char smb2_signature[SMB2_HMACSHA256_SIZE]; |
| 140 | unsigned char *sigptr = smb2_signature; | 140 | unsigned char *sigptr = smb2_signature; |
| 141 | struct kvec *iov = rqst->rq_iov; | 141 | struct kvec *iov = rqst->rq_iov; |
| 142 | struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; | 142 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[1].iov_base; |
| 143 | struct cifs_ses *ses; | 143 | struct cifs_ses *ses; |
| 144 | 144 | ||
| 145 | ses = smb2_find_smb_ses(smb2_pdu, server); | 145 | ses = smb2_find_smb_ses(server, shdr->SessionId); |
| 146 | if (!ses) { | 146 | if (!ses) { |
| 147 | cifs_dbg(VFS, "%s: Could not find session\n", __func__); | 147 | cifs_dbg(VFS, "%s: Could not find session\n", __func__); |
| 148 | return 0; | 148 | return 0; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE); | 151 | memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE); |
| 152 | memset(smb2_pdu->Signature, 0x0, SMB2_SIGNATURE_SIZE); | 152 | memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); |
| 153 | 153 | ||
| 154 | rc = smb2_crypto_shash_allocate(server); | 154 | rc = smb2_crypto_shash_allocate(server); |
| 155 | if (rc) { | 155 | if (rc) { |
| @@ -174,7 +174,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 174 | &server->secmech.sdeschmacsha256->shash); | 174 | &server->secmech.sdeschmacsha256->shash); |
| 175 | 175 | ||
| 176 | if (!rc) | 176 | if (!rc) |
| 177 | memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); | 177 | memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); |
| 178 | 178 | ||
| 179 | return rc; | 179 | return rc; |
| 180 | } | 180 | } |
| @@ -356,17 +356,17 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 356 | unsigned char smb3_signature[SMB2_CMACAES_SIZE]; | 356 | unsigned char smb3_signature[SMB2_CMACAES_SIZE]; |
| 357 | unsigned char *sigptr = smb3_signature; | 357 | unsigned char *sigptr = smb3_signature; |
| 358 | struct kvec *iov = rqst->rq_iov; | 358 | struct kvec *iov = rqst->rq_iov; |
| 359 | struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; | 359 | struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[1].iov_base; |
| 360 | struct cifs_ses *ses; | 360 | struct cifs_ses *ses; |
| 361 | 361 | ||
| 362 | ses = smb2_find_smb_ses(smb2_pdu, server); | 362 | ses = smb2_find_smb_ses(server, shdr->SessionId); |
| 363 | if (!ses) { | 363 | if (!ses) { |
| 364 | cifs_dbg(VFS, "%s: Could not find session\n", __func__); | 364 | cifs_dbg(VFS, "%s: Could not find session\n", __func__); |
| 365 | return 0; | 365 | return 0; |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE); | 368 | memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE); |
| 369 | memset(smb2_pdu->Signature, 0x0, SMB2_SIGNATURE_SIZE); | 369 | memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE); |
| 370 | 370 | ||
| 371 | rc = crypto_shash_setkey(server->secmech.cmacaes, | 371 | rc = crypto_shash_setkey(server->secmech.cmacaes, |
| 372 | ses->smb3signingkey, SMB2_CMACAES_SIZE); | 372 | ses->smb3signingkey, SMB2_CMACAES_SIZE); |
| @@ -391,7 +391,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 391 | &server->secmech.sdesccmacaes->shash); | 391 | &server->secmech.sdesccmacaes->shash); |
| 392 | 392 | ||
| 393 | if (!rc) | 393 | if (!rc) |
| 394 | memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); | 394 | memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE); |
| 395 | 395 | ||
| 396 | return rc; | 396 | return rc; |
| 397 | } | 397 | } |
| @@ -401,14 +401,15 @@ static int | |||
| 401 | smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) | 401 | smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) |
| 402 | { | 402 | { |
| 403 | int rc = 0; | 403 | int rc = 0; |
| 404 | struct smb2_hdr *smb2_pdu = rqst->rq_iov[0].iov_base; | 404 | struct smb2_sync_hdr *shdr = |
| 405 | (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base; | ||
| 405 | 406 | ||
| 406 | if (!(smb2_pdu->Flags & SMB2_FLAGS_SIGNED) || | 407 | if (!(shdr->Flags & SMB2_FLAGS_SIGNED) || |
| 407 | server->tcpStatus == CifsNeedNegotiate) | 408 | server->tcpStatus == CifsNeedNegotiate) |
| 408 | return rc; | 409 | return rc; |
| 409 | 410 | ||
| 410 | if (!server->session_estab) { | 411 | if (!server->session_estab) { |
| 411 | strncpy(smb2_pdu->Signature, "BSRSPYL", 8); | 412 | strncpy(shdr->Signature, "BSRSPYL", 8); |
| 412 | return rc; | 413 | return rc; |
| 413 | } | 414 | } |
| 414 | 415 | ||
| @@ -422,11 +423,12 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 422 | { | 423 | { |
| 423 | unsigned int rc; | 424 | unsigned int rc; |
| 424 | char server_response_sig[16]; | 425 | char server_response_sig[16]; |
| 425 | struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; | 426 | struct smb2_sync_hdr *shdr = |
| 427 | (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base; | ||
| 426 | 428 | ||
| 427 | if ((smb2_pdu->Command == SMB2_NEGOTIATE) || | 429 | if ((shdr->Command == SMB2_NEGOTIATE) || |
| 428 | (smb2_pdu->Command == SMB2_SESSION_SETUP) || | 430 | (shdr->Command == SMB2_SESSION_SETUP) || |
| 429 | (smb2_pdu->Command == SMB2_OPLOCK_BREAK) || | 431 | (shdr->Command == SMB2_OPLOCK_BREAK) || |
| 430 | (!server->session_estab)) | 432 | (!server->session_estab)) |
| 431 | return 0; | 433 | return 0; |
| 432 | 434 | ||
| @@ -436,17 +438,17 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 436 | */ | 438 | */ |
| 437 | 439 | ||
| 438 | /* Do not need to verify session setups with signature "BSRSPYL " */ | 440 | /* Do not need to verify session setups with signature "BSRSPYL " */ |
| 439 | if (memcmp(smb2_pdu->Signature, "BSRSPYL ", 8) == 0) | 441 | if (memcmp(shdr->Signature, "BSRSPYL ", 8) == 0) |
| 440 | cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n", | 442 | cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n", |
| 441 | smb2_pdu->Command); | 443 | shdr->Command); |
| 442 | 444 | ||
| 443 | /* | 445 | /* |
| 444 | * Save off the origiginal signature so we can modify the smb and check | 446 | * Save off the origiginal signature so we can modify the smb and check |
| 445 | * our calculated signature against what the server sent. | 447 | * our calculated signature against what the server sent. |
| 446 | */ | 448 | */ |
| 447 | memcpy(server_response_sig, smb2_pdu->Signature, SMB2_SIGNATURE_SIZE); | 449 | memcpy(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE); |
| 448 | 450 | ||
| 449 | memset(smb2_pdu->Signature, 0, SMB2_SIGNATURE_SIZE); | 451 | memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE); |
| 450 | 452 | ||
| 451 | mutex_lock(&server->srv_mutex); | 453 | mutex_lock(&server->srv_mutex); |
| 452 | rc = server->ops->calc_signature(rqst, server); | 454 | rc = server->ops->calc_signature(rqst, server); |
| @@ -455,8 +457,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 455 | if (rc) | 457 | if (rc) |
| 456 | return rc; | 458 | return rc; |
| 457 | 459 | ||
| 458 | if (memcmp(server_response_sig, smb2_pdu->Signature, | 460 | if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) |
| 459 | SMB2_SIGNATURE_SIZE)) | ||
| 460 | return -EACCES; | 461 | return -EACCES; |
| 461 | else | 462 | else |
| 462 | return 0; | 463 | return 0; |
| @@ -467,18 +468,19 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | |||
| 467 | * and when srv_mutex is held. | 468 | * and when srv_mutex is held. |
| 468 | */ | 469 | */ |
| 469 | static inline void | 470 | static inline void |
| 470 | smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *hdr) | 471 | smb2_seq_num_into_buf(struct TCP_Server_Info *server, |
| 472 | struct smb2_sync_hdr *shdr) | ||
| 471 | { | 473 | { |
| 472 | unsigned int i, num = le16_to_cpu(hdr->CreditCharge); | 474 | unsigned int i, num = le16_to_cpu(shdr->CreditCharge); |
| 473 | 475 | ||
| 474 | hdr->MessageId = get_next_mid64(server); | 476 | shdr->MessageId = get_next_mid64(server); |
| 475 | /* skip message numbers according to CreditCharge field */ | 477 | /* skip message numbers according to CreditCharge field */ |
| 476 | for (i = 1; i < num; i++) | 478 | for (i = 1; i < num; i++) |
| 477 | get_next_mid(server); | 479 | get_next_mid(server); |
| 478 | } | 480 | } |
| 479 | 481 | ||
| 480 | static struct mid_q_entry * | 482 | static struct mid_q_entry * |
| 481 | smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer, | 483 | smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr, |
| 482 | struct TCP_Server_Info *server) | 484 | struct TCP_Server_Info *server) |
| 483 | { | 485 | { |
| 484 | struct mid_q_entry *temp; | 486 | struct mid_q_entry *temp; |
| @@ -493,9 +495,9 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer, | |||
| 493 | return temp; | 495 | return temp; |
| 494 | else { | 496 | else { |
| 495 | memset(temp, 0, sizeof(struct mid_q_entry)); | 497 | memset(temp, 0, sizeof(struct mid_q_entry)); |
| 496 | temp->mid = le64_to_cpu(smb_buffer->MessageId); | 498 | temp->mid = le64_to_cpu(shdr->MessageId); |
| 497 | temp->pid = current->pid; | 499 | temp->pid = current->pid; |
| 498 | temp->command = smb_buffer->Command; /* Always LE */ | 500 | temp->command = shdr->Command; /* Always LE */ |
| 499 | temp->when_alloc = jiffies; | 501 | temp->when_alloc = jiffies; |
| 500 | temp->server = server; | 502 | temp->server = server; |
| 501 | 503 | ||
| @@ -513,7 +515,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer, | |||
| 513 | } | 515 | } |
| 514 | 516 | ||
| 515 | static int | 517 | static int |
| 516 | smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_hdr *buf, | 518 | smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr, |
| 517 | struct mid_q_entry **mid) | 519 | struct mid_q_entry **mid) |
| 518 | { | 520 | { |
| 519 | if (ses->server->tcpStatus == CifsExiting) | 521 | if (ses->server->tcpStatus == CifsExiting) |
| @@ -525,19 +527,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_hdr *buf, | |||
| 525 | } | 527 | } |
| 526 | 528 | ||
| 527 | if (ses->status == CifsNew) { | 529 | if (ses->status == CifsNew) { |
| 528 | if ((buf->Command != SMB2_SESSION_SETUP) && | 530 | if ((shdr->Command != SMB2_SESSION_SETUP) && |
| 529 | (buf->Command != SMB2_NEGOTIATE)) | 531 | (shdr->Command != SMB2_NEGOTIATE)) |
| 530 | return -EAGAIN; | 532 | return -EAGAIN; |
| 531 | /* else ok - we are setting up session */ | 533 | /* else ok - we are setting up session */ |
| 532 | } | 534 | } |
| 533 | 535 | ||
| 534 | if (ses->status == CifsExiting) { | 536 | if (ses->status == CifsExiting) { |
| 535 | if (buf->Command != SMB2_LOGOFF) | 537 | if (shdr->Command != SMB2_LOGOFF) |
| 536 | return -EAGAIN; | 538 | return -EAGAIN; |
| 537 | /* else ok - we are shutting down the session */ | 539 | /* else ok - we are shutting down the session */ |
| 538 | } | 540 | } |
| 539 | 541 | ||
| 540 | *mid = smb2_mid_entry_alloc(buf, ses->server); | 542 | *mid = smb2_mid_entry_alloc(shdr, ses->server); |
| 541 | if (*mid == NULL) | 543 | if (*mid == NULL) |
| 542 | return -ENOMEM; | 544 | return -ENOMEM; |
| 543 | spin_lock(&GlobalMid_Lock); | 545 | spin_lock(&GlobalMid_Lock); |
| @@ -551,16 +553,18 @@ smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, | |||
| 551 | bool log_error) | 553 | bool log_error) |
| 552 | { | 554 | { |
| 553 | unsigned int len = get_rfc1002_length(mid->resp_buf); | 555 | unsigned int len = get_rfc1002_length(mid->resp_buf); |
| 554 | struct kvec iov; | 556 | struct kvec iov[2]; |
| 555 | struct smb_rqst rqst = { .rq_iov = &iov, | 557 | struct smb_rqst rqst = { .rq_iov = iov, |
| 556 | .rq_nvec = 1 }; | 558 | .rq_nvec = 2 }; |
| 557 | 559 | ||
| 558 | iov.iov_base = (char *)mid->resp_buf; | 560 | iov[0].iov_base = (char *)mid->resp_buf; |
| 559 | iov.iov_len = get_rfc1002_length(mid->resp_buf) + 4; | 561 | iov[0].iov_len = 4; |
| 562 | iov[1].iov_base = (char *)mid->resp_buf + 4; | ||
| 563 | iov[1].iov_len = len; | ||
| 560 | 564 | ||
| 561 | dump_smb(mid->resp_buf, min_t(u32, 80, len)); | 565 | dump_smb(mid->resp_buf, min_t(u32, 80, len)); |
| 562 | /* convert the length into a more usable form */ | 566 | /* convert the length into a more usable form */ |
| 563 | if (len > 24 && server->sign) { | 567 | if (len > 24 && server->sign && !mid->decrypted) { |
| 564 | int rc; | 568 | int rc; |
| 565 | 569 | ||
| 566 | rc = smb2_verify_signature(&rqst, server); | 570 | rc = smb2_verify_signature(&rqst, server); |
| @@ -576,12 +580,13 @@ struct mid_q_entry * | |||
| 576 | smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) | 580 | smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) |
| 577 | { | 581 | { |
| 578 | int rc; | 582 | int rc; |
| 579 | struct smb2_hdr *hdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; | 583 | struct smb2_sync_hdr *shdr = |
| 584 | (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base; | ||
| 580 | struct mid_q_entry *mid; | 585 | struct mid_q_entry *mid; |
| 581 | 586 | ||
| 582 | smb2_seq_num_into_buf(ses->server, hdr); | 587 | smb2_seq_num_into_buf(ses->server, shdr); |
| 583 | 588 | ||
| 584 | rc = smb2_get_mid_entry(ses, hdr, &mid); | 589 | rc = smb2_get_mid_entry(ses, shdr, &mid); |
| 585 | if (rc) | 590 | if (rc) |
| 586 | return ERR_PTR(rc); | 591 | return ERR_PTR(rc); |
| 587 | rc = smb2_sign_rqst(rqst, ses->server); | 592 | rc = smb2_sign_rqst(rqst, ses->server); |
| @@ -596,12 +601,13 @@ struct mid_q_entry * | |||
| 596 | smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) | 601 | smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
| 597 | { | 602 | { |
| 598 | int rc; | 603 | int rc; |
| 599 | struct smb2_hdr *hdr = (struct smb2_hdr *)rqst->rq_iov[0].iov_base; | 604 | struct smb2_sync_hdr *shdr = |
| 605 | (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base; | ||
| 600 | struct mid_q_entry *mid; | 606 | struct mid_q_entry *mid; |
| 601 | 607 | ||
| 602 | smb2_seq_num_into_buf(server, hdr); | 608 | smb2_seq_num_into_buf(server, shdr); |
| 603 | 609 | ||
| 604 | mid = smb2_mid_entry_alloc(hdr, server); | 610 | mid = smb2_mid_entry_alloc(shdr, server); |
| 605 | if (mid == NULL) | 611 | if (mid == NULL) |
| 606 | return ERR_PTR(-ENOMEM); | 612 | return ERR_PTR(-ENOMEM); |
| 607 | 613 | ||
| @@ -613,3 +619,33 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
| 613 | 619 | ||
| 614 | return mid; | 620 | return mid; |
| 615 | } | 621 | } |
| 622 | |||
| 623 | int | ||
| 624 | smb3_crypto_aead_allocate(struct TCP_Server_Info *server) | ||
| 625 | { | ||
| 626 | struct crypto_aead *tfm; | ||
| 627 | |||
| 628 | if (!server->secmech.ccmaesencrypt) { | ||
| 629 | tfm = crypto_alloc_aead("ccm(aes)", 0, 0); | ||
| 630 | if (IS_ERR(tfm)) { | ||
| 631 | cifs_dbg(VFS, "%s: Failed to alloc encrypt aead\n", | ||
| 632 | __func__); | ||
| 633 | return PTR_ERR(tfm); | ||
| 634 | } | ||
| 635 | server->secmech.ccmaesencrypt = tfm; | ||
| 636 | } | ||
| 637 | |||
| 638 | if (!server->secmech.ccmaesdecrypt) { | ||
| 639 | tfm = crypto_alloc_aead("ccm(aes)", 0, 0); | ||
| 640 | if (IS_ERR(tfm)) { | ||
| 641 | crypto_free_aead(server->secmech.ccmaesencrypt); | ||
| 642 | server->secmech.ccmaesencrypt = NULL; | ||
| 643 | cifs_dbg(VFS, "%s: Failed to alloc decrypt aead\n", | ||
| 644 | __func__); | ||
| 645 | return PTR_ERR(tfm); | ||
| 646 | } | ||
| 647 | server->secmech.ccmaesdecrypt = tfm; | ||
| 648 | } | ||
| 649 | |||
| 650 | return 0; | ||
| 651 | } | ||
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fbb84c08e3cd..526f0533cb4e 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -221,7 +221,7 @@ rqst_len(struct smb_rqst *rqst) | |||
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static int | 223 | static int |
| 224 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | 224 | __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
| 225 | { | 225 | { |
| 226 | int rc; | 226 | int rc; |
| 227 | struct kvec *iov = rqst->rq_iov; | 227 | struct kvec *iov = rqst->rq_iov; |
| @@ -245,8 +245,12 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
| 245 | return -EIO; | 245 | return -EIO; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | if (n_vec < 2) | ||
| 249 | return -EIO; | ||
| 250 | |||
| 248 | cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); | 251 | cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); |
| 249 | dump_smb(iov[0].iov_base, iov[0].iov_len); | 252 | dump_smb(iov[0].iov_base, iov[0].iov_len); |
| 253 | dump_smb(iov[1].iov_base, iov[1].iov_len); | ||
| 250 | 254 | ||
| 251 | /* cork the socket */ | 255 | /* cork the socket */ |
| 252 | kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, | 256 | kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, |
| @@ -309,24 +313,43 @@ uncork: | |||
| 309 | } | 313 | } |
| 310 | 314 | ||
| 311 | static int | 315 | static int |
| 312 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | 316 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags) |
| 313 | { | 317 | { |
| 314 | struct smb_rqst rqst = { .rq_iov = iov, | 318 | struct smb_rqst cur_rqst; |
| 315 | .rq_nvec = n_vec }; | 319 | int rc; |
| 320 | |||
| 321 | if (!(flags & CIFS_TRANSFORM_REQ)) | ||
| 322 | return __smb_send_rqst(server, rqst); | ||
| 323 | |||
| 324 | if (!server->ops->init_transform_rq || | ||
| 325 | !server->ops->free_transform_rq) { | ||
| 326 | cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n"); | ||
| 327 | return -EIO; | ||
| 328 | } | ||
| 329 | |||
| 330 | rc = server->ops->init_transform_rq(server, &cur_rqst, rqst); | ||
| 331 | if (rc) | ||
| 332 | return rc; | ||
| 316 | 333 | ||
| 317 | return smb_send_rqst(server, &rqst); | 334 | rc = __smb_send_rqst(server, &cur_rqst); |
| 335 | server->ops->free_transform_rq(&cur_rqst); | ||
| 336 | return rc; | ||
| 318 | } | 337 | } |
| 319 | 338 | ||
| 320 | int | 339 | int |
| 321 | smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, | 340 | smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, |
| 322 | unsigned int smb_buf_length) | 341 | unsigned int smb_buf_length) |
| 323 | { | 342 | { |
| 324 | struct kvec iov; | 343 | struct kvec iov[2]; |
| 344 | struct smb_rqst rqst = { .rq_iov = iov, | ||
| 345 | .rq_nvec = 2 }; | ||
| 325 | 346 | ||
| 326 | iov.iov_base = smb_buffer; | 347 | iov[0].iov_base = smb_buffer; |
| 327 | iov.iov_len = smb_buf_length + 4; | 348 | iov[0].iov_len = 4; |
| 349 | iov[1].iov_base = (char *)smb_buffer + 4; | ||
| 350 | iov[1].iov_len = smb_buf_length; | ||
| 328 | 351 | ||
| 329 | return smb_sendv(server, &iov, 1); | 352 | return __smb_send_rqst(server, &rqst); |
| 330 | } | 353 | } |
| 331 | 354 | ||
| 332 | static int | 355 | static int |
| @@ -454,6 +477,10 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
| 454 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; | 477 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
| 455 | struct mid_q_entry *mid; | 478 | struct mid_q_entry *mid; |
| 456 | 479 | ||
| 480 | if (rqst->rq_iov[0].iov_len != 4 || | ||
| 481 | rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) | ||
| 482 | return ERR_PTR(-EIO); | ||
| 483 | |||
| 457 | /* enable signing if server requires it */ | 484 | /* enable signing if server requires it */ |
| 458 | if (server->sign) | 485 | if (server->sign) |
| 459 | hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 486 | hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
| @@ -478,7 +505,7 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
| 478 | int | 505 | int |
| 479 | cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, | 506 | cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, |
| 480 | mid_receive_t *receive, mid_callback_t *callback, | 507 | mid_receive_t *receive, mid_callback_t *callback, |
| 481 | void *cbdata, const int flags) | 508 | mid_handle_t *handle, void *cbdata, const int flags) |
| 482 | { | 509 | { |
| 483 | int rc, timeout, optype; | 510 | int rc, timeout, optype; |
| 484 | struct mid_q_entry *mid; | 511 | struct mid_q_entry *mid; |
| @@ -505,6 +532,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, | |||
| 505 | mid->receive = receive; | 532 | mid->receive = receive; |
| 506 | mid->callback = callback; | 533 | mid->callback = callback; |
| 507 | mid->callback_data = cbdata; | 534 | mid->callback_data = cbdata; |
| 535 | mid->handle = handle; | ||
| 508 | mid->mid_state = MID_REQUEST_SUBMITTED; | 536 | mid->mid_state = MID_REQUEST_SUBMITTED; |
| 509 | 537 | ||
| 510 | /* put it on the pending_mid_q */ | 538 | /* put it on the pending_mid_q */ |
| @@ -514,7 +542,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, | |||
| 514 | 542 | ||
| 515 | 543 | ||
| 516 | cifs_in_send_inc(server); | 544 | cifs_in_send_inc(server); |
| 517 | rc = smb_send_rqst(server, rqst); | 545 | rc = smb_send_rqst(server, rqst, flags); |
| 518 | cifs_in_send_dec(server); | 546 | cifs_in_send_dec(server); |
| 519 | cifs_save_when_sent(mid); | 547 | cifs_save_when_sent(mid); |
| 520 | 548 | ||
| @@ -547,12 +575,13 @@ SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, | |||
| 547 | { | 575 | { |
| 548 | int rc; | 576 | int rc; |
| 549 | struct kvec iov[1]; | 577 | struct kvec iov[1]; |
| 578 | struct kvec rsp_iov; | ||
| 550 | int resp_buf_type; | 579 | int resp_buf_type; |
| 551 | 580 | ||
| 552 | iov[0].iov_base = in_buf; | 581 | iov[0].iov_base = in_buf; |
| 553 | iov[0].iov_len = get_rfc1002_length(in_buf) + 4; | 582 | iov[0].iov_len = get_rfc1002_length(in_buf) + 4; |
| 554 | flags |= CIFS_NO_RESP; | 583 | flags |= CIFS_NO_RESP; |
| 555 | rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); | 584 | rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); |
| 556 | cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); | 585 | cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); |
| 557 | 586 | ||
| 558 | return rc; | 587 | return rc; |
| @@ -595,10 +624,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | |||
| 595 | } | 624 | } |
| 596 | 625 | ||
| 597 | static inline int | 626 | static inline int |
| 598 | send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid) | 627 | send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst, |
| 628 | struct mid_q_entry *mid) | ||
| 599 | { | 629 | { |
| 600 | return server->ops->send_cancel ? | 630 | return server->ops->send_cancel ? |
| 601 | server->ops->send_cancel(server, buf, mid) : 0; | 631 | server->ops->send_cancel(server, rqst, mid) : 0; |
| 602 | } | 632 | } |
| 603 | 633 | ||
| 604 | int | 634 | int |
| @@ -611,13 +641,15 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, | |||
| 611 | 641 | ||
| 612 | /* convert the length into a more usable form */ | 642 | /* convert the length into a more usable form */ |
| 613 | if (server->sign) { | 643 | if (server->sign) { |
| 614 | struct kvec iov; | 644 | struct kvec iov[2]; |
| 615 | int rc = 0; | 645 | int rc = 0; |
| 616 | struct smb_rqst rqst = { .rq_iov = &iov, | 646 | struct smb_rqst rqst = { .rq_iov = iov, |
| 617 | .rq_nvec = 1 }; | 647 | .rq_nvec = 2 }; |
| 618 | 648 | ||
| 619 | iov.iov_base = mid->resp_buf; | 649 | iov[0].iov_base = mid->resp_buf; |
| 620 | iov.iov_len = len; | 650 | iov[0].iov_len = 4; |
| 651 | iov[1].iov_base = (char *)mid->resp_buf + 4; | ||
| 652 | iov[1].iov_len = len - 4; | ||
| 621 | /* FIXME: add code to kill session */ | 653 | /* FIXME: add code to kill session */ |
| 622 | rc = cifs_verify_signature(&rqst, server, | 654 | rc = cifs_verify_signature(&rqst, server, |
| 623 | mid->sequence_number); | 655 | mid->sequence_number); |
| @@ -637,6 +669,10 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) | |||
| 637 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; | 669 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
| 638 | struct mid_q_entry *mid; | 670 | struct mid_q_entry *mid; |
| 639 | 671 | ||
| 672 | if (rqst->rq_iov[0].iov_len != 4 || | ||
| 673 | rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) | ||
| 674 | return ERR_PTR(-EIO); | ||
| 675 | |||
| 640 | rc = allocate_mid(ses, hdr, &mid); | 676 | rc = allocate_mid(ses, hdr, &mid); |
| 641 | if (rc) | 677 | if (rc) |
| 642 | return ERR_PTR(rc); | 678 | return ERR_PTR(rc); |
| @@ -649,17 +685,15 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) | |||
| 649 | } | 685 | } |
| 650 | 686 | ||
| 651 | int | 687 | int |
| 652 | SendReceive2(const unsigned int xid, struct cifs_ses *ses, | 688 | cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, |
| 653 | struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, | 689 | struct smb_rqst *rqst, int *resp_buf_type, const int flags, |
| 654 | const int flags) | 690 | struct kvec *resp_iov) |
| 655 | { | 691 | { |
| 656 | int rc = 0; | 692 | int rc = 0; |
| 657 | int timeout, optype; | 693 | int timeout, optype; |
| 658 | struct mid_q_entry *midQ; | 694 | struct mid_q_entry *midQ; |
| 659 | char *buf = iov[0].iov_base; | ||
| 660 | unsigned int credits = 1; | 695 | unsigned int credits = 1; |
| 661 | struct smb_rqst rqst = { .rq_iov = iov, | 696 | char *buf; |
| 662 | .rq_nvec = n_vec }; | ||
| 663 | 697 | ||
| 664 | timeout = flags & CIFS_TIMEOUT_MASK; | 698 | timeout = flags & CIFS_TIMEOUT_MASK; |
| 665 | optype = flags & CIFS_OP_MASK; | 699 | optype = flags & CIFS_OP_MASK; |
| @@ -667,15 +701,12 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
| 667 | *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */ | 701 | *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */ |
| 668 | 702 | ||
| 669 | if ((ses == NULL) || (ses->server == NULL)) { | 703 | if ((ses == NULL) || (ses->server == NULL)) { |
| 670 | cifs_small_buf_release(buf); | ||
| 671 | cifs_dbg(VFS, "Null session\n"); | 704 | cifs_dbg(VFS, "Null session\n"); |
| 672 | return -EIO; | 705 | return -EIO; |
| 673 | } | 706 | } |
| 674 | 707 | ||
| 675 | if (ses->server->tcpStatus == CifsExiting) { | 708 | if (ses->server->tcpStatus == CifsExiting) |
| 676 | cifs_small_buf_release(buf); | ||
| 677 | return -ENOENT; | 709 | return -ENOENT; |
| 678 | } | ||
| 679 | 710 | ||
| 680 | /* | 711 | /* |
| 681 | * Ensure that we do not send more than 50 overlapping requests | 712 | * Ensure that we do not send more than 50 overlapping requests |
| @@ -684,10 +715,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
| 684 | */ | 715 | */ |
| 685 | 716 | ||
| 686 | rc = wait_for_free_request(ses->server, timeout, optype); | 717 | rc = wait_for_free_request(ses->server, timeout, optype); |
| 687 | if (rc) { | 718 | if (rc) |
| 688 | cifs_small_buf_release(buf); | ||
| 689 | return rc; | 719 | return rc; |
| 690 | } | ||
| 691 | 720 | ||
| 692 | /* | 721 | /* |
| 693 | * Make sure that we sign in the same order that we send on this socket | 722 | * Make sure that we sign in the same order that we send on this socket |
| @@ -697,10 +726,9 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
| 697 | 726 | ||
| 698 | mutex_lock(&ses->server->srv_mutex); | 727 | mutex_lock(&ses->server->srv_mutex); |
| 699 | 728 | ||
| 700 | midQ = ses->server->ops->setup_request(ses, &rqst); | 729 | midQ = ses->server->ops->setup_request(ses, rqst); |
| 701 | if (IS_ERR(midQ)) { | 730 | if (IS_ERR(midQ)) { |
| 702 | mutex_unlock(&ses->server->srv_mutex); | 731 | mutex_unlock(&ses->server->srv_mutex); |
| 703 | cifs_small_buf_release(buf); | ||
| 704 | /* Update # of requests on wire to server */ | 732 | /* Update # of requests on wire to server */ |
| 705 | add_credits(ses->server, 1, optype); | 733 | add_credits(ses->server, 1, optype); |
| 706 | return PTR_ERR(midQ); | 734 | return PTR_ERR(midQ); |
| @@ -708,7 +736,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
| 708 | 736 | ||
| 709 | midQ->mid_state = MID_REQUEST_SUBMITTED; | 737 | midQ->mid_state = MID_REQUEST_SUBMITTED; |
| 710 | cifs_in_send_inc(ses->server); | 738 | cifs_in_send_inc(ses->server); |
| 711 | rc = smb_sendv(ses->server, iov, n_vec); | 739 | rc = smb_send_rqst(ses->server, rqst, flags); |
| 712 | cifs_in_send_dec(ses->server); | 740 | cifs_in_send_dec(ses->server); |
| 713 | cifs_save_when_sent(midQ); | 741 | cifs_save_when_sent(midQ); |
| 714 | 742 | ||
| @@ -716,32 +744,25 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
| 716 | ses->server->sequence_number -= 2; | 744 | ses->server->sequence_number -= 2; |
| 717 | mutex_unlock(&ses->server->srv_mutex); | 745 | mutex_unlock(&ses->server->srv_mutex); |
| 718 | 746 | ||
| 719 | if (rc < 0) { | 747 | if (rc < 0) |
| 720 | cifs_small_buf_release(buf); | ||
| 721 | goto out; | 748 | goto out; |
| 722 | } | ||
| 723 | 749 | ||
| 724 | if (timeout == CIFS_ASYNC_OP) { | 750 | if (timeout == CIFS_ASYNC_OP) |
| 725 | cifs_small_buf_release(buf); | ||
| 726 | goto out; | 751 | goto out; |
| 727 | } | ||
| 728 | 752 | ||
| 729 | rc = wait_for_response(ses->server, midQ); | 753 | rc = wait_for_response(ses->server, midQ); |
| 730 | if (rc != 0) { | 754 | if (rc != 0) { |
| 731 | send_cancel(ses->server, buf, midQ); | 755 | send_cancel(ses->server, rqst, midQ); |
| 732 | spin_lock(&GlobalMid_Lock); | 756 | spin_lock(&GlobalMid_Lock); |
| 733 | if (midQ->mid_state == MID_REQUEST_SUBMITTED) { | 757 | if (midQ->mid_state == MID_REQUEST_SUBMITTED) { |
| 734 | midQ->callback = DeleteMidQEntry; | 758 | midQ->callback = DeleteMidQEntry; |
| 735 | spin_unlock(&GlobalMid_Lock); | 759 | spin_unlock(&GlobalMid_Lock); |
| 736 | cifs_small_buf_release(buf); | ||
| 737 | add_credits(ses->server, 1, optype); | 760 | add_credits(ses->server, 1, optype); |
| 738 | return rc; | 761 | return rc; |
| 739 | } | 762 | } |
| 740 | spin_unlock(&GlobalMid_Lock); | 763 | spin_unlock(&GlobalMid_Lock); |
| 741 | } | 764 | } |
| 742 | 765 | ||
| 743 | cifs_small_buf_release(buf); | ||
| 744 | |||
| 745 | rc = cifs_sync_mid_result(midQ, ses->server); | 766 | rc = cifs_sync_mid_result(midQ, ses->server); |
| 746 | if (rc != 0) { | 767 | if (rc != 0) { |
| 747 | add_credits(ses->server, 1, optype); | 768 | add_credits(ses->server, 1, optype); |
| @@ -755,8 +776,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
| 755 | } | 776 | } |
| 756 | 777 | ||
| 757 | buf = (char *)midQ->resp_buf; | 778 | buf = (char *)midQ->resp_buf; |
| 758 | iov[0].iov_base = buf; | 779 | resp_iov->iov_base = buf; |
| 759 | iov[0].iov_len = get_rfc1002_length(buf) + 4; | 780 | resp_iov->iov_len = get_rfc1002_length(buf) + 4; |
| 760 | if (midQ->large_buf) | 781 | if (midQ->large_buf) |
| 761 | *resp_buf_type = CIFS_LARGE_BUFFER; | 782 | *resp_buf_type = CIFS_LARGE_BUFFER; |
| 762 | else | 783 | else |
| @@ -778,12 +799,45 @@ out: | |||
| 778 | } | 799 | } |
| 779 | 800 | ||
| 780 | int | 801 | int |
| 802 | SendReceive2(const unsigned int xid, struct cifs_ses *ses, | ||
| 803 | struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, | ||
| 804 | const int flags, struct kvec *resp_iov) | ||
| 805 | { | ||
| 806 | struct smb_rqst rqst; | ||
| 807 | struct kvec *new_iov; | ||
| 808 | int rc; | ||
| 809 | |||
| 810 | new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL); | ||
| 811 | if (!new_iov) | ||
| 812 | return -ENOMEM; | ||
| 813 | |||
| 814 | /* 1st iov is a RFC1001 length followed by the rest of the packet */ | ||
| 815 | memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); | ||
| 816 | |||
| 817 | new_iov[0].iov_base = new_iov[1].iov_base; | ||
| 818 | new_iov[0].iov_len = 4; | ||
| 819 | new_iov[1].iov_base += 4; | ||
| 820 | new_iov[1].iov_len -= 4; | ||
| 821 | |||
| 822 | memset(&rqst, 0, sizeof(struct smb_rqst)); | ||
| 823 | rqst.rq_iov = new_iov; | ||
| 824 | rqst.rq_nvec = n_vec + 1; | ||
| 825 | |||
| 826 | rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov); | ||
| 827 | kfree(new_iov); | ||
| 828 | return rc; | ||
| 829 | } | ||
| 830 | |||
| 831 | int | ||
| 781 | SendReceive(const unsigned int xid, struct cifs_ses *ses, | 832 | SendReceive(const unsigned int xid, struct cifs_ses *ses, |
| 782 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, | 833 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, |
| 783 | int *pbytes_returned, const int timeout) | 834 | int *pbytes_returned, const int timeout) |
| 784 | { | 835 | { |
| 785 | int rc = 0; | 836 | int rc = 0; |
| 786 | struct mid_q_entry *midQ; | 837 | struct mid_q_entry *midQ; |
| 838 | unsigned int len = be32_to_cpu(in_buf->smb_buf_length); | ||
| 839 | struct kvec iov = { .iov_base = in_buf, .iov_len = len }; | ||
| 840 | struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; | ||
| 787 | 841 | ||
| 788 | if (ses == NULL) { | 842 | if (ses == NULL) { |
| 789 | cifs_dbg(VFS, "Null smb session\n"); | 843 | cifs_dbg(VFS, "Null smb session\n"); |
| @@ -801,10 +855,9 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
| 801 | to the same server. We may make this configurable later or | 855 | to the same server. We may make this configurable later or |
| 802 | use ses->maxReq */ | 856 | use ses->maxReq */ |
| 803 | 857 | ||
| 804 | if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize + | 858 | if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
| 805 | MAX_CIFS_HDR_SIZE - 4) { | ||
| 806 | cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n", | 859 | cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n", |
| 807 | be32_to_cpu(in_buf->smb_buf_length)); | 860 | len); |
| 808 | return -EIO; | 861 | return -EIO; |
| 809 | } | 862 | } |
| 810 | 863 | ||
| @@ -835,7 +888,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
| 835 | midQ->mid_state = MID_REQUEST_SUBMITTED; | 888 | midQ->mid_state = MID_REQUEST_SUBMITTED; |
| 836 | 889 | ||
| 837 | cifs_in_send_inc(ses->server); | 890 | cifs_in_send_inc(ses->server); |
| 838 | rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); | 891 | rc = smb_send(ses->server, in_buf, len); |
| 839 | cifs_in_send_dec(ses->server); | 892 | cifs_in_send_dec(ses->server); |
| 840 | cifs_save_when_sent(midQ); | 893 | cifs_save_when_sent(midQ); |
| 841 | 894 | ||
| @@ -852,7 +905,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
| 852 | 905 | ||
| 853 | rc = wait_for_response(ses->server, midQ); | 906 | rc = wait_for_response(ses->server, midQ); |
| 854 | if (rc != 0) { | 907 | if (rc != 0) { |
| 855 | send_cancel(ses->server, in_buf, midQ); | 908 | send_cancel(ses->server, &rqst, midQ); |
| 856 | spin_lock(&GlobalMid_Lock); | 909 | spin_lock(&GlobalMid_Lock); |
| 857 | if (midQ->mid_state == MID_REQUEST_SUBMITTED) { | 910 | if (midQ->mid_state == MID_REQUEST_SUBMITTED) { |
| 858 | /* no longer considered to be "in-flight" */ | 911 | /* no longer considered to be "in-flight" */ |
| @@ -921,6 +974,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 921 | int rstart = 0; | 974 | int rstart = 0; |
| 922 | struct mid_q_entry *midQ; | 975 | struct mid_q_entry *midQ; |
| 923 | struct cifs_ses *ses; | 976 | struct cifs_ses *ses; |
| 977 | unsigned int len = be32_to_cpu(in_buf->smb_buf_length); | ||
| 978 | struct kvec iov = { .iov_base = in_buf, .iov_len = len }; | ||
| 979 | struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; | ||
| 924 | 980 | ||
| 925 | if (tcon == NULL || tcon->ses == NULL) { | 981 | if (tcon == NULL || tcon->ses == NULL) { |
| 926 | cifs_dbg(VFS, "Null smb session\n"); | 982 | cifs_dbg(VFS, "Null smb session\n"); |
| @@ -940,10 +996,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 940 | to the same server. We may make this configurable later or | 996 | to the same server. We may make this configurable later or |
| 941 | use ses->maxReq */ | 997 | use ses->maxReq */ |
| 942 | 998 | ||
| 943 | if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize + | 999 | if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { |
| 944 | MAX_CIFS_HDR_SIZE - 4) { | ||
| 945 | cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n", | 1000 | cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n", |
| 946 | be32_to_cpu(in_buf->smb_buf_length)); | 1001 | len); |
| 947 | return -EIO; | 1002 | return -EIO; |
| 948 | } | 1003 | } |
| 949 | 1004 | ||
| @@ -972,7 +1027,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 972 | 1027 | ||
| 973 | midQ->mid_state = MID_REQUEST_SUBMITTED; | 1028 | midQ->mid_state = MID_REQUEST_SUBMITTED; |
| 974 | cifs_in_send_inc(ses->server); | 1029 | cifs_in_send_inc(ses->server); |
| 975 | rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); | 1030 | rc = smb_send(ses->server, in_buf, len); |
| 976 | cifs_in_send_dec(ses->server); | 1031 | cifs_in_send_dec(ses->server); |
| 977 | cifs_save_when_sent(midQ); | 1032 | cifs_save_when_sent(midQ); |
| 978 | 1033 | ||
| @@ -1001,7 +1056,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1001 | if (in_buf->Command == SMB_COM_TRANSACTION2) { | 1056 | if (in_buf->Command == SMB_COM_TRANSACTION2) { |
| 1002 | /* POSIX lock. We send a NT_CANCEL SMB to cause the | 1057 | /* POSIX lock. We send a NT_CANCEL SMB to cause the |
| 1003 | blocking lock to return. */ | 1058 | blocking lock to return. */ |
| 1004 | rc = send_cancel(ses->server, in_buf, midQ); | 1059 | rc = send_cancel(ses->server, &rqst, midQ); |
| 1005 | if (rc) { | 1060 | if (rc) { |
| 1006 | cifs_delete_mid(midQ); | 1061 | cifs_delete_mid(midQ); |
| 1007 | return rc; | 1062 | return rc; |
| @@ -1022,7 +1077,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 1022 | 1077 | ||
| 1023 | rc = wait_for_response(ses->server, midQ); | 1078 | rc = wait_for_response(ses->server, midQ); |
| 1024 | if (rc) { | 1079 | if (rc) { |
| 1025 | send_cancel(ses->server, in_buf, midQ); | 1080 | send_cancel(ses->server, &rqst, midQ); |
| 1026 | spin_lock(&GlobalMid_Lock); | 1081 | spin_lock(&GlobalMid_Lock); |
| 1027 | if (midQ->mid_state == MID_REQUEST_SUBMITTED) { | 1082 | if (midQ->mid_state == MID_REQUEST_SUBMITTED) { |
| 1028 | /* no longer considered to be "in-flight" */ | 1083 | /* no longer considered to be "in-flight" */ |
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index 4d24d17bcfc1..504b3c3539dc 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c | |||
| @@ -51,22 +51,8 @@ | |||
| 51 | #define elf_prstatus compat_elf_prstatus | 51 | #define elf_prstatus compat_elf_prstatus |
| 52 | #define elf_prpsinfo compat_elf_prpsinfo | 52 | #define elf_prpsinfo compat_elf_prpsinfo |
| 53 | 53 | ||
| 54 | /* | 54 | #undef ns_to_timeval |
| 55 | * Compat version of cputime_to_compat_timeval, perhaps this | 55 | #define ns_to_timeval ns_to_compat_timeval |
| 56 | * should be an inline in <linux/compat.h>. | ||
| 57 | */ | ||
| 58 | static void cputime_to_compat_timeval(const cputime_t cputime, | ||
| 59 | struct compat_timeval *value) | ||
| 60 | { | ||
| 61 | struct timeval tv; | ||
| 62 | cputime_to_timeval(cputime, &tv); | ||
| 63 | value->tv_sec = tv.tv_sec; | ||
| 64 | value->tv_usec = tv.tv_usec; | ||
| 65 | } | ||
| 66 | |||
| 67 | #undef cputime_to_timeval | ||
| 68 | #define cputime_to_timeval cputime_to_compat_timeval | ||
| 69 | |||
| 70 | 56 | ||
| 71 | /* | 57 | /* |
| 72 | * To use this file, asm/elf.h must define compat_elf_check_arch. | 58 | * To use this file, asm/elf.h must define compat_elf_check_arch. |
diff --git a/fs/coredump.c b/fs/coredump.c index e525b6017cdf..ae6b05629ca1 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
| @@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align) | |||
| 833 | return mod ? dump_skip(cprm, align - mod) : 1; | 833 | return mod ? dump_skip(cprm, align - mod) : 1; |
| 834 | } | 834 | } |
| 835 | EXPORT_SYMBOL(dump_align); | 835 | EXPORT_SYMBOL(dump_align); |
| 836 | |||
| 837 | /* | ||
| 838 | * Ensures that file size is big enough to contain the current file | ||
| 839 | * postion. This prevents gdb from complaining about a truncated file | ||
| 840 | * if the last "write" to the file was dump_skip. | ||
| 841 | */ | ||
| 842 | void dump_truncate(struct coredump_params *cprm) | ||
| 843 | { | ||
| 844 | struct file *file = cprm->file; | ||
| 845 | loff_t offset; | ||
| 846 | |||
| 847 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
| 848 | offset = file->f_op->llseek(file, 0, SEEK_CUR); | ||
| 849 | if (i_size_read(file->f_mapping->host) < offset) | ||
| 850 | do_truncate(file->f_path.dentry, offset, 0, file); | ||
| 851 | } | ||
| 852 | } | ||
| 853 | EXPORT_SYMBOL(dump_truncate); | ||
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index f514978f6688..08b46e6e3995 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | config FS_ENCRYPTION | 1 | config FS_ENCRYPTION |
| 2 | tristate "FS Encryption (Per-file encryption)" | 2 | tristate "FS Encryption (Per-file encryption)" |
| 3 | depends on BLOCK | ||
| 4 | select CRYPTO | 3 | select CRYPTO |
| 5 | select CRYPTO_AES | 4 | select CRYPTO_AES |
| 6 | select CRYPTO_CBC | 5 | select CRYPTO_CBC |
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index f17684c48739..9f6607f17b53 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o | 1 | obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o |
| 2 | 2 | ||
| 3 | fscrypto-y := crypto.o fname.o policy.o keyinfo.o | 3 | fscrypto-y := crypto.o fname.o policy.o keyinfo.o |
| 4 | fscrypto-$(CONFIG_BLOCK) += bio.o | ||
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c new file mode 100644 index 000000000000..a409a84f1bca --- /dev/null +++ b/fs/crypto/bio.c | |||
| @@ -0,0 +1,145 @@ | |||
| 1 | /* | ||
| 2 | * This contains encryption functions for per-file encryption. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015, Google, Inc. | ||
| 5 | * Copyright (C) 2015, Motorola Mobility | ||
| 6 | * | ||
| 7 | * Written by Michael Halcrow, 2014. | ||
| 8 | * | ||
| 9 | * Filename encryption additions | ||
| 10 | * Uday Savagaonkar, 2014 | ||
| 11 | * Encryption policy handling additions | ||
| 12 | * Ildar Muslukhov, 2014 | ||
| 13 | * Add fscrypt_pullback_bio_page() | ||
| 14 | * Jaegeuk Kim, 2015. | ||
| 15 | * | ||
| 16 | * This has not yet undergone a rigorous security audit. | ||
| 17 | * | ||
| 18 | * The usage of AES-XTS should conform to recommendations in NIST | ||
| 19 | * Special Publication 800-38E and IEEE P1619/D16. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/pagemap.h> | ||
| 23 | #include <linux/module.h> | ||
| 24 | #include <linux/bio.h> | ||
| 25 | #include <linux/namei.h> | ||
| 26 | #include "fscrypt_private.h" | ||
| 27 | |||
| 28 | /* | ||
| 29 | * Call fscrypt_decrypt_page on every single page, reusing the encryption | ||
| 30 | * context. | ||
| 31 | */ | ||
| 32 | static void completion_pages(struct work_struct *work) | ||
| 33 | { | ||
| 34 | struct fscrypt_ctx *ctx = | ||
| 35 | container_of(work, struct fscrypt_ctx, r.work); | ||
| 36 | struct bio *bio = ctx->r.bio; | ||
| 37 | struct bio_vec *bv; | ||
| 38 | int i; | ||
| 39 | |||
| 40 | bio_for_each_segment_all(bv, bio, i) { | ||
| 41 | struct page *page = bv->bv_page; | ||
| 42 | int ret = fscrypt_decrypt_page(page->mapping->host, page, | ||
| 43 | PAGE_SIZE, 0, page->index); | ||
| 44 | |||
| 45 | if (ret) { | ||
| 46 | WARN_ON_ONCE(1); | ||
| 47 | SetPageError(page); | ||
| 48 | } else { | ||
| 49 | SetPageUptodate(page); | ||
| 50 | } | ||
| 51 | unlock_page(page); | ||
| 52 | } | ||
| 53 | fscrypt_release_ctx(ctx); | ||
| 54 | bio_put(bio); | ||
| 55 | } | ||
| 56 | |||
| 57 | void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) | ||
| 58 | { | ||
| 59 | INIT_WORK(&ctx->r.work, completion_pages); | ||
| 60 | ctx->r.bio = bio; | ||
| 61 | queue_work(fscrypt_read_workqueue, &ctx->r.work); | ||
| 62 | } | ||
| 63 | EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); | ||
| 64 | |||
| 65 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
| 66 | { | ||
| 67 | struct fscrypt_ctx *ctx; | ||
| 68 | struct page *bounce_page; | ||
| 69 | |||
| 70 | /* The bounce data pages are unmapped. */ | ||
| 71 | if ((*page)->mapping) | ||
| 72 | return; | ||
| 73 | |||
| 74 | /* The bounce data page is unmapped. */ | ||
| 75 | bounce_page = *page; | ||
| 76 | ctx = (struct fscrypt_ctx *)page_private(bounce_page); | ||
| 77 | |||
| 78 | /* restore control page */ | ||
| 79 | *page = ctx->w.control_page; | ||
| 80 | |||
| 81 | if (restore) | ||
| 82 | fscrypt_restore_control_page(bounce_page); | ||
| 83 | } | ||
| 84 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | ||
| 85 | |||
| 86 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | ||
| 87 | sector_t pblk, unsigned int len) | ||
| 88 | { | ||
| 89 | struct fscrypt_ctx *ctx; | ||
| 90 | struct page *ciphertext_page = NULL; | ||
| 91 | struct bio *bio; | ||
| 92 | int ret, err = 0; | ||
| 93 | |||
| 94 | BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); | ||
| 95 | |||
| 96 | ctx = fscrypt_get_ctx(inode, GFP_NOFS); | ||
| 97 | if (IS_ERR(ctx)) | ||
| 98 | return PTR_ERR(ctx); | ||
| 99 | |||
| 100 | ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT); | ||
| 101 | if (IS_ERR(ciphertext_page)) { | ||
| 102 | err = PTR_ERR(ciphertext_page); | ||
| 103 | goto errout; | ||
| 104 | } | ||
| 105 | |||
| 106 | while (len--) { | ||
| 107 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, | ||
| 108 | ZERO_PAGE(0), ciphertext_page, | ||
| 109 | PAGE_SIZE, 0, GFP_NOFS); | ||
| 110 | if (err) | ||
| 111 | goto errout; | ||
| 112 | |||
| 113 | bio = bio_alloc(GFP_NOWAIT, 1); | ||
| 114 | if (!bio) { | ||
| 115 | err = -ENOMEM; | ||
| 116 | goto errout; | ||
| 117 | } | ||
| 118 | bio->bi_bdev = inode->i_sb->s_bdev; | ||
| 119 | bio->bi_iter.bi_sector = | ||
| 120 | pblk << (inode->i_sb->s_blocksize_bits - 9); | ||
| 121 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
| 122 | ret = bio_add_page(bio, ciphertext_page, | ||
| 123 | inode->i_sb->s_blocksize, 0); | ||
| 124 | if (ret != inode->i_sb->s_blocksize) { | ||
| 125 | /* should never happen! */ | ||
| 126 | WARN_ON(1); | ||
| 127 | bio_put(bio); | ||
| 128 | err = -EIO; | ||
| 129 | goto errout; | ||
| 130 | } | ||
| 131 | err = submit_bio_wait(bio); | ||
| 132 | if ((err == 0) && bio->bi_error) | ||
| 133 | err = -EIO; | ||
| 134 | bio_put(bio); | ||
| 135 | if (err) | ||
| 136 | goto errout; | ||
| 137 | lblk++; | ||
| 138 | pblk++; | ||
| 139 | } | ||
| 140 | err = 0; | ||
| 141 | errout: | ||
| 142 | fscrypt_release_ctx(ctx); | ||
| 143 | return err; | ||
| 144 | } | ||
| 145 | EXPORT_SYMBOL(fscrypt_zeroout_range); | ||
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ac8e4f6a3773..02a7a9286449 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
| 26 | #include <linux/ratelimit.h> | 26 | #include <linux/ratelimit.h> |
| 27 | #include <linux/bio.h> | ||
| 28 | #include <linux/dcache.h> | 27 | #include <linux/dcache.h> |
| 29 | #include <linux/namei.h> | 28 | #include <linux/namei.h> |
| 30 | #include "fscrypt_private.h" | 29 | #include "fscrypt_private.h" |
| @@ -44,7 +43,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL; | |||
| 44 | static LIST_HEAD(fscrypt_free_ctxs); | 43 | static LIST_HEAD(fscrypt_free_ctxs); |
| 45 | static DEFINE_SPINLOCK(fscrypt_ctx_lock); | 44 | static DEFINE_SPINLOCK(fscrypt_ctx_lock); |
| 46 | 45 | ||
| 47 | static struct workqueue_struct *fscrypt_read_workqueue; | 46 | struct workqueue_struct *fscrypt_read_workqueue; |
| 48 | static DEFINE_MUTEX(fscrypt_init_mutex); | 47 | static DEFINE_MUTEX(fscrypt_init_mutex); |
| 49 | 48 | ||
| 50 | static struct kmem_cache *fscrypt_ctx_cachep; | 49 | static struct kmem_cache *fscrypt_ctx_cachep; |
| @@ -141,16 +140,10 @@ static void page_crypt_complete(struct crypto_async_request *req, int res) | |||
| 141 | complete(&ecr->completion); | 140 | complete(&ecr->completion); |
| 142 | } | 141 | } |
| 143 | 142 | ||
| 144 | typedef enum { | 143 | int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, |
| 145 | FS_DECRYPT = 0, | 144 | u64 lblk_num, struct page *src_page, |
| 146 | FS_ENCRYPT, | 145 | struct page *dest_page, unsigned int len, |
| 147 | } fscrypt_direction_t; | 146 | unsigned int offs, gfp_t gfp_flags) |
| 148 | |||
| 149 | static int do_page_crypto(const struct inode *inode, | ||
| 150 | fscrypt_direction_t rw, u64 lblk_num, | ||
| 151 | struct page *src_page, struct page *dest_page, | ||
| 152 | unsigned int len, unsigned int offs, | ||
| 153 | gfp_t gfp_flags) | ||
| 154 | { | 147 | { |
| 155 | struct { | 148 | struct { |
| 156 | __le64 index; | 149 | __le64 index; |
| @@ -205,7 +198,8 @@ static int do_page_crypto(const struct inode *inode, | |||
| 205 | return 0; | 198 | return 0; |
| 206 | } | 199 | } |
| 207 | 200 | ||
| 208 | static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) | 201 | struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, |
| 202 | gfp_t gfp_flags) | ||
| 209 | { | 203 | { |
| 210 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); | 204 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); |
| 211 | if (ctx->w.bounce_page == NULL) | 205 | if (ctx->w.bounce_page == NULL) |
| @@ -260,9 +254,9 @@ struct page *fscrypt_encrypt_page(const struct inode *inode, | |||
| 260 | 254 | ||
| 261 | if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { | 255 | if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { |
| 262 | /* with inplace-encryption we just encrypt the page */ | 256 | /* with inplace-encryption we just encrypt the page */ |
| 263 | err = do_page_crypto(inode, FS_ENCRYPT, lblk_num, | 257 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, |
| 264 | page, ciphertext_page, | 258 | ciphertext_page, len, offs, |
| 265 | len, offs, gfp_flags); | 259 | gfp_flags); |
| 266 | if (err) | 260 | if (err) |
| 267 | return ERR_PTR(err); | 261 | return ERR_PTR(err); |
| 268 | 262 | ||
| @@ -276,14 +270,14 @@ struct page *fscrypt_encrypt_page(const struct inode *inode, | |||
| 276 | return (struct page *)ctx; | 270 | return (struct page *)ctx; |
| 277 | 271 | ||
| 278 | /* The encryption operation will require a bounce page. */ | 272 | /* The encryption operation will require a bounce page. */ |
| 279 | ciphertext_page = alloc_bounce_page(ctx, gfp_flags); | 273 | ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); |
| 280 | if (IS_ERR(ciphertext_page)) | 274 | if (IS_ERR(ciphertext_page)) |
| 281 | goto errout; | 275 | goto errout; |
| 282 | 276 | ||
| 283 | ctx->w.control_page = page; | 277 | ctx->w.control_page = page; |
| 284 | err = do_page_crypto(inode, FS_ENCRYPT, lblk_num, | 278 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, |
| 285 | page, ciphertext_page, | 279 | page, ciphertext_page, len, offs, |
| 286 | len, offs, gfp_flags); | 280 | gfp_flags); |
| 287 | if (err) { | 281 | if (err) { |
| 288 | ciphertext_page = ERR_PTR(err); | 282 | ciphertext_page = ERR_PTR(err); |
| 289 | goto errout; | 283 | goto errout; |
| @@ -320,72 +314,11 @@ int fscrypt_decrypt_page(const struct inode *inode, struct page *page, | |||
| 320 | if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) | 314 | if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) |
| 321 | BUG_ON(!PageLocked(page)); | 315 | BUG_ON(!PageLocked(page)); |
| 322 | 316 | ||
| 323 | return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len, | 317 | return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, |
| 324 | offs, GFP_NOFS); | 318 | len, offs, GFP_NOFS); |
| 325 | } | 319 | } |
| 326 | EXPORT_SYMBOL(fscrypt_decrypt_page); | 320 | EXPORT_SYMBOL(fscrypt_decrypt_page); |
| 327 | 321 | ||
| 328 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | ||
| 329 | sector_t pblk, unsigned int len) | ||
| 330 | { | ||
| 331 | struct fscrypt_ctx *ctx; | ||
| 332 | struct page *ciphertext_page = NULL; | ||
| 333 | struct bio *bio; | ||
| 334 | int ret, err = 0; | ||
| 335 | |||
| 336 | BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); | ||
| 337 | |||
| 338 | ctx = fscrypt_get_ctx(inode, GFP_NOFS); | ||
| 339 | if (IS_ERR(ctx)) | ||
| 340 | return PTR_ERR(ctx); | ||
| 341 | |||
| 342 | ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); | ||
| 343 | if (IS_ERR(ciphertext_page)) { | ||
| 344 | err = PTR_ERR(ciphertext_page); | ||
| 345 | goto errout; | ||
| 346 | } | ||
| 347 | |||
| 348 | while (len--) { | ||
| 349 | err = do_page_crypto(inode, FS_ENCRYPT, lblk, | ||
| 350 | ZERO_PAGE(0), ciphertext_page, | ||
| 351 | PAGE_SIZE, 0, GFP_NOFS); | ||
| 352 | if (err) | ||
| 353 | goto errout; | ||
| 354 | |||
| 355 | bio = bio_alloc(GFP_NOWAIT, 1); | ||
| 356 | if (!bio) { | ||
| 357 | err = -ENOMEM; | ||
| 358 | goto errout; | ||
| 359 | } | ||
| 360 | bio->bi_bdev = inode->i_sb->s_bdev; | ||
| 361 | bio->bi_iter.bi_sector = | ||
| 362 | pblk << (inode->i_sb->s_blocksize_bits - 9); | ||
| 363 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
| 364 | ret = bio_add_page(bio, ciphertext_page, | ||
| 365 | inode->i_sb->s_blocksize, 0); | ||
| 366 | if (ret != inode->i_sb->s_blocksize) { | ||
| 367 | /* should never happen! */ | ||
| 368 | WARN_ON(1); | ||
| 369 | bio_put(bio); | ||
| 370 | err = -EIO; | ||
| 371 | goto errout; | ||
| 372 | } | ||
| 373 | err = submit_bio_wait(bio); | ||
| 374 | if ((err == 0) && bio->bi_error) | ||
| 375 | err = -EIO; | ||
| 376 | bio_put(bio); | ||
| 377 | if (err) | ||
| 378 | goto errout; | ||
| 379 | lblk++; | ||
| 380 | pblk++; | ||
| 381 | } | ||
| 382 | err = 0; | ||
| 383 | errout: | ||
| 384 | fscrypt_release_ctx(ctx); | ||
| 385 | return err; | ||
| 386 | } | ||
| 387 | EXPORT_SYMBOL(fscrypt_zeroout_range); | ||
| 388 | |||
| 389 | /* | 322 | /* |
| 390 | * Validate dentries for encrypted directories to make sure we aren't | 323 | * Validate dentries for encrypted directories to make sure we aren't |
| 391 | * potentially caching stale data after a key has been added or | 324 | * potentially caching stale data after a key has been added or |
| @@ -442,64 +375,6 @@ const struct dentry_operations fscrypt_d_ops = { | |||
| 442 | }; | 375 | }; |
| 443 | EXPORT_SYMBOL(fscrypt_d_ops); | 376 | EXPORT_SYMBOL(fscrypt_d_ops); |
| 444 | 377 | ||
| 445 | /* | ||
| 446 | * Call fscrypt_decrypt_page on every single page, reusing the encryption | ||
| 447 | * context. | ||
| 448 | */ | ||
| 449 | static void completion_pages(struct work_struct *work) | ||
| 450 | { | ||
| 451 | struct fscrypt_ctx *ctx = | ||
| 452 | container_of(work, struct fscrypt_ctx, r.work); | ||
| 453 | struct bio *bio = ctx->r.bio; | ||
| 454 | struct bio_vec *bv; | ||
| 455 | int i; | ||
| 456 | |||
| 457 | bio_for_each_segment_all(bv, bio, i) { | ||
| 458 | struct page *page = bv->bv_page; | ||
| 459 | int ret = fscrypt_decrypt_page(page->mapping->host, page, | ||
| 460 | PAGE_SIZE, 0, page->index); | ||
| 461 | |||
| 462 | if (ret) { | ||
| 463 | WARN_ON_ONCE(1); | ||
| 464 | SetPageError(page); | ||
| 465 | } else { | ||
| 466 | SetPageUptodate(page); | ||
| 467 | } | ||
| 468 | unlock_page(page); | ||
| 469 | } | ||
| 470 | fscrypt_release_ctx(ctx); | ||
| 471 | bio_put(bio); | ||
| 472 | } | ||
| 473 | |||
| 474 | void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) | ||
| 475 | { | ||
| 476 | INIT_WORK(&ctx->r.work, completion_pages); | ||
| 477 | ctx->r.bio = bio; | ||
| 478 | queue_work(fscrypt_read_workqueue, &ctx->r.work); | ||
| 479 | } | ||
| 480 | EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); | ||
| 481 | |||
| 482 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
| 483 | { | ||
| 484 | struct fscrypt_ctx *ctx; | ||
| 485 | struct page *bounce_page; | ||
| 486 | |||
| 487 | /* The bounce data pages are unmapped. */ | ||
| 488 | if ((*page)->mapping) | ||
| 489 | return; | ||
| 490 | |||
| 491 | /* The bounce data page is unmapped. */ | ||
| 492 | bounce_page = *page; | ||
| 493 | ctx = (struct fscrypt_ctx *)page_private(bounce_page); | ||
| 494 | |||
| 495 | /* restore control page */ | ||
| 496 | *page = ctx->w.control_page; | ||
| 497 | |||
| 498 | if (restore) | ||
| 499 | fscrypt_restore_control_page(bounce_page); | ||
| 500 | } | ||
| 501 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | ||
| 502 | |||
| 503 | void fscrypt_restore_control_page(struct page *page) | 378 | void fscrypt_restore_control_page(struct page *page) |
| 504 | { | 379 | { |
| 505 | struct fscrypt_ctx *ctx; | 380 | struct fscrypt_ctx *ctx; |
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 56ad9d195f18..13052b85c393 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c | |||
| @@ -332,7 +332,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode, | |||
| 332 | * in a directory. Consequently, a user space name cannot be mapped to | 332 | * in a directory. Consequently, a user space name cannot be mapped to |
| 333 | * a disk-space name | 333 | * a disk-space name |
| 334 | */ | 334 | */ |
| 335 | return -EACCES; | 335 | return -ENOKEY; |
| 336 | } | 336 | } |
| 337 | EXPORT_SYMBOL(fscrypt_fname_usr_to_disk); | 337 | EXPORT_SYMBOL(fscrypt_fname_usr_to_disk); |
| 338 | 338 | ||
| @@ -367,7 +367,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, | |||
| 367 | return 0; | 367 | return 0; |
| 368 | } | 368 | } |
| 369 | if (!lookup) | 369 | if (!lookup) |
| 370 | return -EACCES; | 370 | return -ENOKEY; |
| 371 | 371 | ||
| 372 | /* | 372 | /* |
| 373 | * We don't have the key and we are doing a lookup; decode the | 373 | * We don't have the key and we are doing a lookup; decode the |
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index aeab032d7d35..fdbb8af32eaf 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | #ifndef _FSCRYPT_PRIVATE_H | 11 | #ifndef _FSCRYPT_PRIVATE_H |
| 12 | #define _FSCRYPT_PRIVATE_H | 12 | #define _FSCRYPT_PRIVATE_H |
| 13 | 13 | ||
| 14 | #include <linux/fscrypto.h> | 14 | #include <linux/fscrypt_supp.h> |
| 15 | 15 | ||
| 16 | #define FS_FNAME_CRYPTO_DIGEST_SIZE 32 | 16 | #define FS_FNAME_CRYPTO_DIGEST_SIZE 32 |
| 17 | 17 | ||
| @@ -71,6 +71,11 @@ struct fscrypt_info { | |||
| 71 | u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; | 71 | u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | typedef enum { | ||
| 75 | FS_DECRYPT = 0, | ||
| 76 | FS_ENCRYPT, | ||
| 77 | } fscrypt_direction_t; | ||
| 78 | |||
| 74 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 | 79 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 |
| 75 | #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 | 80 | #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 |
| 76 | 81 | ||
| @@ -81,11 +86,20 @@ struct fscrypt_completion_result { | |||
| 81 | 86 | ||
| 82 | #define DECLARE_FS_COMPLETION_RESULT(ecr) \ | 87 | #define DECLARE_FS_COMPLETION_RESULT(ecr) \ |
| 83 | struct fscrypt_completion_result ecr = { \ | 88 | struct fscrypt_completion_result ecr = { \ |
| 84 | COMPLETION_INITIALIZER((ecr).completion), 0 } | 89 | COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 } |
| 85 | 90 | ||
| 86 | 91 | ||
| 87 | /* crypto.c */ | 92 | /* crypto.c */ |
| 88 | int fscrypt_initialize(unsigned int cop_flags); | 93 | extern int fscrypt_initialize(unsigned int cop_flags); |
| 94 | extern struct workqueue_struct *fscrypt_read_workqueue; | ||
| 95 | extern int fscrypt_do_page_crypto(const struct inode *inode, | ||
| 96 | fscrypt_direction_t rw, u64 lblk_num, | ||
| 97 | struct page *src_page, | ||
| 98 | struct page *dest_page, | ||
| 99 | unsigned int len, unsigned int offs, | ||
| 100 | gfp_t gfp_flags); | ||
| 101 | extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, | ||
| 102 | gfp_t gfp_flags); | ||
| 89 | 103 | ||
| 90 | /* keyinfo.c */ | 104 | /* keyinfo.c */ |
| 91 | extern int fscrypt_get_crypt_info(struct inode *); | 105 | extern int fscrypt_get_crypt_info(struct inode *); |
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 6eeea1dcba41..02eb6b9e4438 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
| @@ -77,26 +77,22 @@ out: | |||
| 77 | 77 | ||
| 78 | static int validate_user_key(struct fscrypt_info *crypt_info, | 78 | static int validate_user_key(struct fscrypt_info *crypt_info, |
| 79 | struct fscrypt_context *ctx, u8 *raw_key, | 79 | struct fscrypt_context *ctx, u8 *raw_key, |
| 80 | u8 *prefix, int prefix_size) | 80 | const char *prefix) |
| 81 | { | 81 | { |
| 82 | u8 *full_key_descriptor; | 82 | char *description; |
| 83 | struct key *keyring_key; | 83 | struct key *keyring_key; |
| 84 | struct fscrypt_key *master_key; | 84 | struct fscrypt_key *master_key; |
| 85 | const struct user_key_payload *ukp; | 85 | const struct user_key_payload *ukp; |
| 86 | int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1; | ||
| 87 | int res; | 86 | int res; |
| 88 | 87 | ||
| 89 | full_key_descriptor = kmalloc(full_key_len, GFP_NOFS); | 88 | description = kasprintf(GFP_NOFS, "%s%*phN", prefix, |
| 90 | if (!full_key_descriptor) | 89 | FS_KEY_DESCRIPTOR_SIZE, |
| 90 | ctx->master_key_descriptor); | ||
| 91 | if (!description) | ||
| 91 | return -ENOMEM; | 92 | return -ENOMEM; |
| 92 | 93 | ||
| 93 | memcpy(full_key_descriptor, prefix, prefix_size); | 94 | keyring_key = request_key(&key_type_logon, description, NULL); |
| 94 | sprintf(full_key_descriptor + prefix_size, | 95 | kfree(description); |
| 95 | "%*phN", FS_KEY_DESCRIPTOR_SIZE, | ||
| 96 | ctx->master_key_descriptor); | ||
| 97 | full_key_descriptor[full_key_len - 1] = '\0'; | ||
| 98 | keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); | ||
| 99 | kfree(full_key_descriptor); | ||
| 100 | if (IS_ERR(keyring_key)) | 96 | if (IS_ERR(keyring_key)) |
| 101 | return PTR_ERR(keyring_key); | 97 | return PTR_ERR(keyring_key); |
| 102 | 98 | ||
| @@ -206,12 +202,15 @@ retry: | |||
| 206 | 202 | ||
| 207 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); | 203 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); |
| 208 | if (res < 0) { | 204 | if (res < 0) { |
| 209 | if (!fscrypt_dummy_context_enabled(inode)) | 205 | if (!fscrypt_dummy_context_enabled(inode) || |
| 206 | inode->i_sb->s_cop->is_encrypted(inode)) | ||
| 210 | return res; | 207 | return res; |
| 208 | /* Fake up a context for an unencrypted directory */ | ||
| 209 | memset(&ctx, 0, sizeof(ctx)); | ||
| 211 | ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; | 210 | ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; |
| 212 | ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; | 211 | ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; |
| 213 | ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; | 212 | ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; |
| 214 | ctx.flags = 0; | 213 | memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); |
| 215 | } else if (res != sizeof(ctx)) { | 214 | } else if (res != sizeof(ctx)) { |
| 216 | return -EINVAL; | 215 | return -EINVAL; |
| 217 | } | 216 | } |
| @@ -247,20 +246,10 @@ retry: | |||
| 247 | if (!raw_key) | 246 | if (!raw_key) |
| 248 | goto out; | 247 | goto out; |
| 249 | 248 | ||
| 250 | if (fscrypt_dummy_context_enabled(inode)) { | 249 | res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX); |
| 251 | memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); | ||
| 252 | goto got_key; | ||
| 253 | } | ||
| 254 | |||
| 255 | res = validate_user_key(crypt_info, &ctx, raw_key, | ||
| 256 | FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE); | ||
| 257 | if (res && inode->i_sb->s_cop->key_prefix) { | 250 | if (res && inode->i_sb->s_cop->key_prefix) { |
| 258 | u8 *prefix = NULL; | 251 | int res2 = validate_user_key(crypt_info, &ctx, raw_key, |
| 259 | int prefix_size, res2; | 252 | inode->i_sb->s_cop->key_prefix); |
| 260 | |||
| 261 | prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix); | ||
| 262 | res2 = validate_user_key(crypt_info, &ctx, raw_key, | ||
| 263 | prefix, prefix_size); | ||
| 264 | if (res2) { | 253 | if (res2) { |
| 265 | if (res2 == -ENOKEY) | 254 | if (res2 == -ENOKEY) |
| 266 | res = -ENOKEY; | 255 | res = -ENOKEY; |
| @@ -269,7 +258,6 @@ retry: | |||
| 269 | } else if (res) { | 258 | } else if (res) { |
| 270 | goto out; | 259 | goto out; |
| 271 | } | 260 | } |
| 272 | got_key: | ||
| 273 | ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); | 261 | ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); |
| 274 | if (!ctfm || IS_ERR(ctfm)) { | 262 | if (!ctfm || IS_ERR(ctfm)) { |
| 275 | res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; | 263 | res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; |
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 6ed7c2eebeec..14b76da71269 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c | |||
| @@ -13,37 +13,20 @@ | |||
| 13 | #include <linux/mount.h> | 13 | #include <linux/mount.h> |
| 14 | #include "fscrypt_private.h" | 14 | #include "fscrypt_private.h" |
| 15 | 15 | ||
| 16 | static int inode_has_encryption_context(struct inode *inode) | ||
| 17 | { | ||
| 18 | if (!inode->i_sb->s_cop->get_context) | ||
| 19 | return 0; | ||
| 20 | return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0); | ||
| 21 | } | ||
| 22 | |||
| 23 | /* | 16 | /* |
| 24 | * check whether the policy is consistent with the encryption context | 17 | * check whether an encryption policy is consistent with an encryption context |
| 25 | * for the inode | ||
| 26 | */ | 18 | */ |
| 27 | static int is_encryption_context_consistent_with_policy(struct inode *inode, | 19 | static bool is_encryption_context_consistent_with_policy( |
| 20 | const struct fscrypt_context *ctx, | ||
| 28 | const struct fscrypt_policy *policy) | 21 | const struct fscrypt_policy *policy) |
| 29 | { | 22 | { |
| 30 | struct fscrypt_context ctx; | 23 | return memcmp(ctx->master_key_descriptor, policy->master_key_descriptor, |
| 31 | int res; | 24 | FS_KEY_DESCRIPTOR_SIZE) == 0 && |
| 32 | 25 | (ctx->flags == policy->flags) && | |
| 33 | if (!inode->i_sb->s_cop->get_context) | 26 | (ctx->contents_encryption_mode == |
| 34 | return 0; | 27 | policy->contents_encryption_mode) && |
| 35 | 28 | (ctx->filenames_encryption_mode == | |
| 36 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); | 29 | policy->filenames_encryption_mode); |
| 37 | if (res != sizeof(ctx)) | ||
| 38 | return 0; | ||
| 39 | |||
| 40 | return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, | ||
| 41 | FS_KEY_DESCRIPTOR_SIZE) == 0 && | ||
| 42 | (ctx.flags == policy->flags) && | ||
| 43 | (ctx.contents_encryption_mode == | ||
| 44 | policy->contents_encryption_mode) && | ||
| 45 | (ctx.filenames_encryption_mode == | ||
| 46 | policy->filenames_encryption_mode)); | ||
| 47 | } | 30 | } |
| 48 | 31 | ||
| 49 | static int create_encryption_context_from_policy(struct inode *inode, | 32 | static int create_encryption_context_from_policy(struct inode *inode, |
| @@ -66,20 +49,12 @@ static int create_encryption_context_from_policy(struct inode *inode, | |||
| 66 | FS_KEY_DESCRIPTOR_SIZE); | 49 | FS_KEY_DESCRIPTOR_SIZE); |
| 67 | 50 | ||
| 68 | if (!fscrypt_valid_contents_enc_mode( | 51 | if (!fscrypt_valid_contents_enc_mode( |
| 69 | policy->contents_encryption_mode)) { | 52 | policy->contents_encryption_mode)) |
| 70 | printk(KERN_WARNING | ||
| 71 | "%s: Invalid contents encryption mode %d\n", __func__, | ||
| 72 | policy->contents_encryption_mode); | ||
| 73 | return -EINVAL; | 53 | return -EINVAL; |
| 74 | } | ||
| 75 | 54 | ||
| 76 | if (!fscrypt_valid_filenames_enc_mode( | 55 | if (!fscrypt_valid_filenames_enc_mode( |
| 77 | policy->filenames_encryption_mode)) { | 56 | policy->filenames_encryption_mode)) |
| 78 | printk(KERN_WARNING | ||
| 79 | "%s: Invalid filenames encryption mode %d\n", __func__, | ||
| 80 | policy->filenames_encryption_mode); | ||
| 81 | return -EINVAL; | 57 | return -EINVAL; |
| 82 | } | ||
| 83 | 58 | ||
| 84 | if (policy->flags & ~FS_POLICY_FLAGS_VALID) | 59 | if (policy->flags & ~FS_POLICY_FLAGS_VALID) |
| 85 | return -EINVAL; | 60 | return -EINVAL; |
| @@ -98,6 +73,7 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) | |||
| 98 | struct fscrypt_policy policy; | 73 | struct fscrypt_policy policy; |
| 99 | struct inode *inode = file_inode(filp); | 74 | struct inode *inode = file_inode(filp); |
| 100 | int ret; | 75 | int ret; |
| 76 | struct fscrypt_context ctx; | ||
| 101 | 77 | ||
| 102 | if (copy_from_user(&policy, arg, sizeof(policy))) | 78 | if (copy_from_user(&policy, arg, sizeof(policy))) |
| 103 | return -EFAULT; | 79 | return -EFAULT; |
| @@ -114,9 +90,10 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) | |||
| 114 | 90 | ||
| 115 | inode_lock(inode); | 91 | inode_lock(inode); |
| 116 | 92 | ||
| 117 | if (!inode_has_encryption_context(inode)) { | 93 | ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); |
| 94 | if (ret == -ENODATA) { | ||
| 118 | if (!S_ISDIR(inode->i_mode)) | 95 | if (!S_ISDIR(inode->i_mode)) |
| 119 | ret = -EINVAL; | 96 | ret = -ENOTDIR; |
| 120 | else if (!inode->i_sb->s_cop->empty_dir) | 97 | else if (!inode->i_sb->s_cop->empty_dir) |
| 121 | ret = -EOPNOTSUPP; | 98 | ret = -EOPNOTSUPP; |
| 122 | else if (!inode->i_sb->s_cop->empty_dir(inode)) | 99 | else if (!inode->i_sb->s_cop->empty_dir(inode)) |
| @@ -124,12 +101,14 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) | |||
| 124 | else | 101 | else |
| 125 | ret = create_encryption_context_from_policy(inode, | 102 | ret = create_encryption_context_from_policy(inode, |
| 126 | &policy); | 103 | &policy); |
| 127 | } else if (!is_encryption_context_consistent_with_policy(inode, | 104 | } else if (ret == sizeof(ctx) && |
| 128 | &policy)) { | 105 | is_encryption_context_consistent_with_policy(&ctx, |
| 129 | printk(KERN_WARNING | 106 | &policy)) { |
| 130 | "%s: Policy inconsistent with encryption context\n", | 107 | /* The file already uses the same encryption policy. */ |
| 131 | __func__); | 108 | ret = 0; |
| 132 | ret = -EINVAL; | 109 | } else if (ret >= 0 || ret == -ERANGE) { |
| 110 | /* The file already uses a different encryption policy. */ | ||
| 111 | ret = -EEXIST; | ||
| 133 | } | 112 | } |
| 134 | 113 | ||
| 135 | inode_unlock(inode); | 114 | inode_unlock(inode); |
| @@ -151,8 +130,10 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) | |||
| 151 | return -ENODATA; | 130 | return -ENODATA; |
| 152 | 131 | ||
| 153 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); | 132 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); |
| 133 | if (res < 0 && res != -ERANGE) | ||
| 134 | return res; | ||
| 154 | if (res != sizeof(ctx)) | 135 | if (res != sizeof(ctx)) |
| 155 | return -ENODATA; | 136 | return -EINVAL; |
| 156 | if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) | 137 | if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) |
| 157 | return -EINVAL; | 138 | return -EINVAL; |
| 158 | 139 | ||
| @@ -179,6 +160,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) | |||
| 179 | BUG_ON(1); | 160 | BUG_ON(1); |
| 180 | } | 161 | } |
| 181 | 162 | ||
| 163 | /* No restrictions on file types which are never encrypted */ | ||
| 164 | if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && | ||
| 165 | !S_ISLNK(child->i_mode)) | ||
| 166 | return 1; | ||
| 167 | |||
| 182 | /* no restrictions if the parent directory is not encrypted */ | 168 | /* no restrictions if the parent directory is not encrypted */ |
| 183 | if (!parent->i_sb->s_cop->is_encrypted(parent)) | 169 | if (!parent->i_sb->s_cop->is_encrypted(parent)) |
| 184 | return 1; | 170 | return 1; |
| @@ -212,9 +198,9 @@ EXPORT_SYMBOL(fscrypt_has_permitted_context); | |||
| 212 | * @parent: Parent inode from which the context is inherited. | 198 | * @parent: Parent inode from which the context is inherited. |
| 213 | * @child: Child inode that inherits the context from @parent. | 199 | * @child: Child inode that inherits the context from @parent. |
| 214 | * @fs_data: private data given by FS. | 200 | * @fs_data: private data given by FS. |
| 215 | * @preload: preload child i_crypt_info | 201 | * @preload: preload child i_crypt_info if true |
| 216 | * | 202 | * |
| 217 | * Return: Zero on success, non-zero otherwise | 203 | * Return: 0 on success, -errno on failure |
| 218 | */ | 204 | */ |
| 219 | int fscrypt_inherit_context(struct inode *parent, struct inode *child, | 205 | int fscrypt_inherit_context(struct inode *parent, struct inode *child, |
| 220 | void *fs_data, bool preload) | 206 | void *fs_data, bool preload) |
| @@ -235,19 +221,11 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, | |||
| 235 | return -ENOKEY; | 221 | return -ENOKEY; |
| 236 | 222 | ||
| 237 | ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; | 223 | ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; |
| 238 | if (fscrypt_dummy_context_enabled(parent)) { | 224 | ctx.contents_encryption_mode = ci->ci_data_mode; |
| 239 | ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; | 225 | ctx.filenames_encryption_mode = ci->ci_filename_mode; |
| 240 | ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; | 226 | ctx.flags = ci->ci_flags; |
| 241 | ctx.flags = 0; | 227 | memcpy(ctx.master_key_descriptor, ci->ci_master_key, |
| 242 | memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); | 228 | FS_KEY_DESCRIPTOR_SIZE); |
| 243 | res = 0; | ||
| 244 | } else { | ||
| 245 | ctx.contents_encryption_mode = ci->ci_data_mode; | ||
| 246 | ctx.filenames_encryption_mode = ci->ci_filename_mode; | ||
| 247 | ctx.flags = ci->ci_flags; | ||
| 248 | memcpy(ctx.master_key_descriptor, ci->ci_master_key, | ||
| 249 | FS_KEY_DESCRIPTOR_SIZE); | ||
| 250 | } | ||
| 251 | get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); | 229 | get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); |
| 252 | res = parent->i_sb->s_cop->set_context(child, &ctx, | 230 | res = parent->i_sb->s_cop->set_context(child, &ctx, |
| 253 | sizeof(ctx), fs_data); | 231 | sizeof(ctx), fs_data); |
| @@ -691,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
| 691 | pgoff_t index, unsigned long pfn) | 691 | pgoff_t index, unsigned long pfn) |
| 692 | { | 692 | { |
| 693 | struct vm_area_struct *vma; | 693 | struct vm_area_struct *vma; |
| 694 | pte_t *ptep; | 694 | pte_t pte, *ptep = NULL; |
| 695 | pte_t pte; | 695 | pmd_t *pmdp = NULL; |
| 696 | spinlock_t *ptl; | 696 | spinlock_t *ptl; |
| 697 | bool changed; | 697 | bool changed; |
| 698 | 698 | ||
| @@ -707,21 +707,42 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
| 707 | 707 | ||
| 708 | address = pgoff_address(index, vma); | 708 | address = pgoff_address(index, vma); |
| 709 | changed = false; | 709 | changed = false; |
| 710 | if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) | 710 | if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) |
| 711 | continue; | 711 | continue; |
| 712 | if (pfn != pte_pfn(*ptep)) | ||
| 713 | goto unlock; | ||
| 714 | if (!pte_dirty(*ptep) && !pte_write(*ptep)) | ||
| 715 | goto unlock; | ||
| 716 | 712 | ||
| 717 | flush_cache_page(vma, address, pfn); | 713 | if (pmdp) { |
| 718 | pte = ptep_clear_flush(vma, address, ptep); | 714 | #ifdef CONFIG_FS_DAX_PMD |
| 719 | pte = pte_wrprotect(pte); | 715 | pmd_t pmd; |
| 720 | pte = pte_mkclean(pte); | 716 | |
| 721 | set_pte_at(vma->vm_mm, address, ptep, pte); | 717 | if (pfn != pmd_pfn(*pmdp)) |
| 722 | changed = true; | 718 | goto unlock_pmd; |
| 723 | unlock: | 719 | if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) |
| 724 | pte_unmap_unlock(ptep, ptl); | 720 | goto unlock_pmd; |
| 721 | |||
| 722 | flush_cache_page(vma, address, pfn); | ||
| 723 | pmd = pmdp_huge_clear_flush(vma, address, pmdp); | ||
| 724 | pmd = pmd_wrprotect(pmd); | ||
| 725 | pmd = pmd_mkclean(pmd); | ||
| 726 | set_pmd_at(vma->vm_mm, address, pmdp, pmd); | ||
| 727 | changed = true; | ||
| 728 | unlock_pmd: | ||
| 729 | spin_unlock(ptl); | ||
| 730 | #endif | ||
| 731 | } else { | ||
| 732 | if (pfn != pte_pfn(*ptep)) | ||
| 733 | goto unlock_pte; | ||
| 734 | if (!pte_dirty(*ptep) && !pte_write(*ptep)) | ||
| 735 | goto unlock_pte; | ||
| 736 | |||
| 737 | flush_cache_page(vma, address, pfn); | ||
| 738 | pte = ptep_clear_flush(vma, address, ptep); | ||
| 739 | pte = pte_wrprotect(pte); | ||
| 740 | pte = pte_mkclean(pte); | ||
| 741 | set_pte_at(vma->vm_mm, address, ptep, pte); | ||
| 742 | changed = true; | ||
| 743 | unlock_pte: | ||
| 744 | pte_unmap_unlock(ptep, ptl); | ||
| 745 | } | ||
| 725 | 746 | ||
| 726 | if (changed) | 747 | if (changed) |
| 727 | mmu_notifier_invalidate_page(vma->vm_mm, address); | 748 | mmu_notifier_invalidate_page(vma->vm_mm, address); |
| @@ -969,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector, | |||
| 969 | } | 990 | } |
| 970 | EXPORT_SYMBOL_GPL(__dax_zero_page_range); | 991 | EXPORT_SYMBOL_GPL(__dax_zero_page_range); |
| 971 | 992 | ||
| 972 | #ifdef CONFIG_FS_IOMAP | ||
| 973 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) | 993 | static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) |
| 974 | { | 994 | { |
| 975 | return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); | 995 | return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); |
| @@ -1011,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |||
| 1011 | struct blk_dax_ctl dax = { 0 }; | 1031 | struct blk_dax_ctl dax = { 0 }; |
| 1012 | ssize_t map_len; | 1032 | ssize_t map_len; |
| 1013 | 1033 | ||
| 1034 | if (fatal_signal_pending(current)) { | ||
| 1035 | ret = -EINTR; | ||
| 1036 | break; | ||
| 1037 | } | ||
| 1038 | |||
| 1014 | dax.sector = dax_iomap_sector(iomap, pos); | 1039 | dax.sector = dax_iomap_sector(iomap, pos); |
| 1015 | dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; | 1040 | dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; |
| 1016 | map_len = dax_map_atomic(iomap->bdev, &dax); | 1041 | map_len = dax_map_atomic(iomap->bdev, &dax); |
| @@ -1061,8 +1086,12 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 1061 | loff_t pos = iocb->ki_pos, ret = 0, done = 0; | 1086 | loff_t pos = iocb->ki_pos, ret = 0, done = 0; |
| 1062 | unsigned flags = 0; | 1087 | unsigned flags = 0; |
| 1063 | 1088 | ||
| 1064 | if (iov_iter_rw(iter) == WRITE) | 1089 | if (iov_iter_rw(iter) == WRITE) { |
| 1090 | lockdep_assert_held_exclusive(&inode->i_rwsem); | ||
| 1065 | flags |= IOMAP_WRITE; | 1091 | flags |= IOMAP_WRITE; |
| 1092 | } else { | ||
| 1093 | lockdep_assert_held(&inode->i_rwsem); | ||
| 1094 | } | ||
| 1066 | 1095 | ||
| 1067 | while (iov_iter_count(iter)) { | 1096 | while (iov_iter_count(iter)) { |
| 1068 | ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, | 1097 | ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, |
| @@ -1407,4 +1436,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 1407 | } | 1436 | } |
| 1408 | EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); | 1437 | EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); |
| 1409 | #endif /* CONFIG_FS_DAX_PMD */ | 1438 | #endif /* CONFIG_FS_DAX_PMD */ |
| 1410 | #endif /* CONFIG_FS_IOMAP */ | ||
diff --git a/fs/dcache.c b/fs/dcache.c index 769903dbc19d..95d71eda8142 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -1336,8 +1336,11 @@ int d_set_mounted(struct dentry *dentry) | |||
| 1336 | } | 1336 | } |
| 1337 | spin_lock(&dentry->d_lock); | 1337 | spin_lock(&dentry->d_lock); |
| 1338 | if (!d_unlinked(dentry)) { | 1338 | if (!d_unlinked(dentry)) { |
| 1339 | dentry->d_flags |= DCACHE_MOUNTED; | 1339 | ret = -EBUSY; |
| 1340 | ret = 0; | 1340 | if (!d_mountpoint(dentry)) { |
| 1341 | dentry->d_flags |= DCACHE_MOUNTED; | ||
| 1342 | ret = 0; | ||
| 1343 | } | ||
| 1341 | } | 1344 | } |
| 1342 | spin_unlock(&dentry->d_lock); | 1345 | spin_unlock(&dentry->d_lock); |
| 1343 | out: | 1346 | out: |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index f17fcf89e18e..7fb1732a3630 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
| @@ -248,6 +248,42 @@ static struct file_system_type debug_fs_type = { | |||
| 248 | }; | 248 | }; |
| 249 | MODULE_ALIAS_FS("debugfs"); | 249 | MODULE_ALIAS_FS("debugfs"); |
| 250 | 250 | ||
| 251 | /** | ||
| 252 | * debugfs_lookup() - look up an existing debugfs file | ||
| 253 | * @name: a pointer to a string containing the name of the file to look up. | ||
| 254 | * @parent: a pointer to the parent dentry of the file. | ||
| 255 | * | ||
| 256 | * This function will return a pointer to a dentry if it succeeds. If the file | ||
| 257 | * doesn't exist or an error occurs, %NULL will be returned. The returned | ||
| 258 | * dentry must be passed to dput() when it is no longer needed. | ||
| 259 | * | ||
| 260 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | ||
| 261 | * returned. | ||
| 262 | */ | ||
| 263 | struct dentry *debugfs_lookup(const char *name, struct dentry *parent) | ||
| 264 | { | ||
| 265 | struct dentry *dentry; | ||
| 266 | |||
| 267 | if (IS_ERR(parent)) | ||
| 268 | return NULL; | ||
| 269 | |||
| 270 | if (!parent) | ||
| 271 | parent = debugfs_mount->mnt_root; | ||
| 272 | |||
| 273 | inode_lock(d_inode(parent)); | ||
| 274 | dentry = lookup_one_len(name, parent, strlen(name)); | ||
| 275 | inode_unlock(d_inode(parent)); | ||
| 276 | |||
| 277 | if (IS_ERR(dentry)) | ||
| 278 | return NULL; | ||
| 279 | if (!d_really_is_positive(dentry)) { | ||
| 280 | dput(dentry); | ||
| 281 | return NULL; | ||
| 282 | } | ||
| 283 | return dentry; | ||
| 284 | } | ||
| 285 | EXPORT_SYMBOL_GPL(debugfs_lookup); | ||
| 286 | |||
| 251 | static struct dentry *start_creating(const char *name, struct dentry *parent) | 287 | static struct dentry *start_creating(const char *name, struct dentry *parent) |
| 252 | { | 288 | { |
| 253 | struct dentry *dentry; | 289 | struct dentry *dentry; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index aeae8c063451..c87bae4376b8 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, | |||
| 906 | struct buffer_head *map_bh) | 906 | struct buffer_head *map_bh) |
| 907 | { | 907 | { |
| 908 | const unsigned blkbits = sdio->blkbits; | 908 | const unsigned blkbits = sdio->blkbits; |
| 909 | const unsigned i_blkbits = blkbits + sdio->blkfactor; | ||
| 909 | int ret = 0; | 910 | int ret = 0; |
| 910 | 911 | ||
| 911 | while (sdio->block_in_file < sdio->final_block_in_request) { | 912 | while (sdio->block_in_file < sdio->final_block_in_request) { |
| @@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, | |||
| 949 | clean_bdev_aliases( | 950 | clean_bdev_aliases( |
| 950 | map_bh->b_bdev, | 951 | map_bh->b_bdev, |
| 951 | map_bh->b_blocknr, | 952 | map_bh->b_blocknr, |
| 952 | map_bh->b_size >> blkbits); | 953 | map_bh->b_size >> i_blkbits); |
| 953 | } | 954 | } |
| 954 | 955 | ||
| 955 | if (!sdio->blkfactor) | 956 | if (!sdio->blkfactor) |
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index 5e6a2c0a1f0b..1f7d5e46cdda 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c | |||
| @@ -122,7 +122,7 @@ void exofs_sysfs_dbg_print(void) | |||
| 122 | list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) { | 122 | list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) { |
| 123 | printk(KERN_INFO "%s: name %s ref %d\n", | 123 | printk(KERN_INFO "%s: name %s ref %d\n", |
| 124 | __func__, kobject_name(k_name), | 124 | __func__, kobject_name(k_name), |
| 125 | (int)atomic_read(&k_name->kref.refcount)); | 125 | (int)kref_read(&k_name->kref)); |
| 126 | } | 126 | } |
| 127 | #endif | 127 | #endif |
| 128 | } | 128 | } |
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig index 36bea5adcaba..c634874e12d9 100644 --- a/fs/ext2/Kconfig +++ b/fs/ext2/Kconfig | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | config EXT2_FS | 1 | config EXT2_FS |
| 2 | tristate "Second extended fs support" | 2 | tristate "Second extended fs support" |
| 3 | select FS_IOMAP if FS_DAX | ||
| 4 | help | 3 | help |
| 5 | Ext2 is a standard Linux file system for hard disks. | 4 | Ext2 is a standard Linux file system for hard disks. |
| 6 | 5 | ||
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 7b90691e98c4..e38039fd96ff 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig | |||
| @@ -37,7 +37,6 @@ config EXT4_FS | |||
| 37 | select CRC16 | 37 | select CRC16 |
| 38 | select CRYPTO | 38 | select CRYPTO |
| 39 | select CRYPTO_CRC32C | 39 | select CRYPTO_CRC32C |
| 40 | select FS_IOMAP if FS_DAX | ||
| 41 | help | 40 | help |
| 42 | This is the next generation of the ext3 filesystem. | 41 | This is the next generation of the ext3 filesystem. |
| 43 | 42 | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2163c1e69f2a..01d52b98f9a7 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -32,7 +32,11 @@ | |||
| 32 | #include <linux/percpu_counter.h> | 32 | #include <linux/percpu_counter.h> |
| 33 | #include <linux/ratelimit.h> | 33 | #include <linux/ratelimit.h> |
| 34 | #include <crypto/hash.h> | 34 | #include <crypto/hash.h> |
| 35 | #include <linux/fscrypto.h> | 35 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
| 36 | #include <linux/fscrypt_supp.h> | ||
| 37 | #else | ||
| 38 | #include <linux/fscrypt_notsupp.h> | ||
| 39 | #endif | ||
| 36 | #include <linux/falloc.h> | 40 | #include <linux/falloc.h> |
| 37 | #include <linux/percpu-rwsem.h> | 41 | #include <linux/percpu-rwsem.h> |
| 38 | #ifdef __KERNEL__ | 42 | #ifdef __KERNEL__ |
| @@ -679,6 +683,16 @@ struct fsxattr { | |||
| 679 | #define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR | 683 | #define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR |
| 680 | #define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR | 684 | #define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR |
| 681 | 685 | ||
| 686 | #define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32) | ||
| 687 | |||
| 688 | /* | ||
| 689 | * Flags for going down operation | ||
| 690 | */ | ||
| 691 | #define EXT4_GOING_FLAGS_DEFAULT 0x0 /* going down */ | ||
| 692 | #define EXT4_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */ | ||
| 693 | #define EXT4_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */ | ||
| 694 | |||
| 695 | |||
| 682 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) | 696 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) |
| 683 | /* | 697 | /* |
| 684 | * ioctl commands in 32 bit emulation | 698 | * ioctl commands in 32 bit emulation |
| @@ -1343,11 +1357,6 @@ struct ext4_super_block { | |||
| 1343 | /* Number of quota types we support */ | 1357 | /* Number of quota types we support */ |
| 1344 | #define EXT4_MAXQUOTAS 3 | 1358 | #define EXT4_MAXQUOTAS 3 |
| 1345 | 1359 | ||
| 1346 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
| 1347 | #define EXT4_KEY_DESC_PREFIX "ext4:" | ||
| 1348 | #define EXT4_KEY_DESC_PREFIX_SIZE 5 | ||
| 1349 | #endif | ||
| 1350 | |||
| 1351 | /* | 1360 | /* |
| 1352 | * fourth extended-fs super-block data in memory | 1361 | * fourth extended-fs super-block data in memory |
| 1353 | */ | 1362 | */ |
| @@ -1404,8 +1413,7 @@ struct ext4_sb_info { | |||
| 1404 | struct journal_s *s_journal; | 1413 | struct journal_s *s_journal; |
| 1405 | struct list_head s_orphan; | 1414 | struct list_head s_orphan; |
| 1406 | struct mutex s_orphan_lock; | 1415 | struct mutex s_orphan_lock; |
| 1407 | unsigned long s_resize_flags; /* Flags indicating if there | 1416 | unsigned long s_ext4_flags; /* Ext4 superblock flags */ |
| 1408 | is a resizer */ | ||
| 1409 | unsigned long s_commit_interval; | 1417 | unsigned long s_commit_interval; |
| 1410 | u32 s_max_batch_time; | 1418 | u32 s_max_batch_time; |
| 1411 | u32 s_min_batch_time; | 1419 | u32 s_min_batch_time; |
| @@ -1517,12 +1525,6 @@ struct ext4_sb_info { | |||
| 1517 | 1525 | ||
| 1518 | /* Barrier between changing inodes' journal flags and writepages ops. */ | 1526 | /* Barrier between changing inodes' journal flags and writepages ops. */ |
| 1519 | struct percpu_rw_semaphore s_journal_flag_rwsem; | 1527 | struct percpu_rw_semaphore s_journal_flag_rwsem; |
| 1520 | |||
| 1521 | /* Encryption support */ | ||
| 1522 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
| 1523 | u8 key_prefix[EXT4_KEY_DESC_PREFIX_SIZE]; | ||
| 1524 | u8 key_prefix_size; | ||
| 1525 | #endif | ||
| 1526 | }; | 1528 | }; |
| 1527 | 1529 | ||
| 1528 | static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) | 1530 | static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) |
| @@ -1845,6 +1847,18 @@ static inline bool ext4_has_incompat_features(struct super_block *sb) | |||
| 1845 | } | 1847 | } |
| 1846 | 1848 | ||
| 1847 | /* | 1849 | /* |
| 1850 | * Superblock flags | ||
| 1851 | */ | ||
| 1852 | #define EXT4_FLAGS_RESIZING 0 | ||
| 1853 | #define EXT4_FLAGS_SHUTDOWN 1 | ||
| 1854 | |||
| 1855 | static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi) | ||
| 1856 | { | ||
| 1857 | return test_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | |||
| 1861 | /* | ||
| 1848 | * Default values for user and/or group using reserved blocks | 1862 | * Default values for user and/or group using reserved blocks |
| 1849 | */ | 1863 | */ |
| 1850 | #define EXT4_DEF_RESUID 0 | 1864 | #define EXT4_DEF_RESUID 0 |
| @@ -2320,28 +2334,6 @@ static inline int ext4_fname_setup_filename(struct inode *dir, | |||
| 2320 | } | 2334 | } |
| 2321 | static inline void ext4_fname_free_filename(struct ext4_filename *fname) { } | 2335 | static inline void ext4_fname_free_filename(struct ext4_filename *fname) { } |
| 2322 | 2336 | ||
| 2323 | #define fscrypt_set_d_op(i) | ||
| 2324 | #define fscrypt_get_ctx fscrypt_notsupp_get_ctx | ||
| 2325 | #define fscrypt_release_ctx fscrypt_notsupp_release_ctx | ||
| 2326 | #define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page | ||
| 2327 | #define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page | ||
| 2328 | #define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages | ||
| 2329 | #define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page | ||
| 2330 | #define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page | ||
| 2331 | #define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range | ||
| 2332 | #define fscrypt_ioctl_set_policy fscrypt_notsupp_ioctl_set_policy | ||
| 2333 | #define fscrypt_ioctl_get_policy fscrypt_notsupp_ioctl_get_policy | ||
| 2334 | #define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context | ||
| 2335 | #define fscrypt_inherit_context fscrypt_notsupp_inherit_context | ||
| 2336 | #define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info | ||
| 2337 | #define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info | ||
| 2338 | #define fscrypt_setup_filename fscrypt_notsupp_setup_filename | ||
| 2339 | #define fscrypt_free_filename fscrypt_notsupp_free_filename | ||
| 2340 | #define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size | ||
| 2341 | #define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer | ||
| 2342 | #define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer | ||
| 2343 | #define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr | ||
| 2344 | #define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk | ||
| 2345 | #endif | 2337 | #endif |
| 2346 | 2338 | ||
| 2347 | /* dir.c */ | 2339 | /* dir.c */ |
| @@ -3034,7 +3026,7 @@ extern int ext4_inline_data_fiemap(struct inode *inode, | |||
| 3034 | extern int ext4_try_to_evict_inline_data(handle_t *handle, | 3026 | extern int ext4_try_to_evict_inline_data(handle_t *handle, |
| 3035 | struct inode *inode, | 3027 | struct inode *inode, |
| 3036 | int needed); | 3028 | int needed); |
| 3037 | extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline); | 3029 | extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); |
| 3038 | 3030 | ||
| 3039 | extern int ext4_convert_inline_data(struct inode *inode); | 3031 | extern int ext4_convert_inline_data(struct inode *inode); |
| 3040 | 3032 | ||
| @@ -3228,7 +3220,6 @@ static inline void ext4_inode_resume_unlocked_dio(struct inode *inode) | |||
| 3228 | EXT4_WQ_HASH_SZ]) | 3220 | EXT4_WQ_HASH_SZ]) |
| 3229 | extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; | 3221 | extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; |
| 3230 | 3222 | ||
| 3231 | #define EXT4_RESIZING 0 | ||
| 3232 | extern int ext4_resize_begin(struct super_block *sb); | 3223 | extern int ext4_resize_begin(struct super_block *sb); |
| 3233 | extern void ext4_resize_end(struct super_block *sb); | 3224 | extern void ext4_resize_end(struct super_block *sb); |
| 3234 | 3225 | ||
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index e770c1ee4613..dd106b1d5d89 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
| @@ -43,6 +43,10 @@ static int ext4_journal_check_start(struct super_block *sb) | |||
| 43 | journal_t *journal; | 43 | journal_t *journal; |
| 44 | 44 | ||
| 45 | might_sleep(); | 45 | might_sleep(); |
| 46 | |||
| 47 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) | ||
| 48 | return -EIO; | ||
| 49 | |||
| 46 | if (sb->s_flags & MS_RDONLY) | 50 | if (sb->s_flags & MS_RDONLY) |
| 47 | return -EROFS; | 51 | return -EROFS; |
| 48 | WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); | 52 | WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); |
| @@ -161,6 +165,13 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line, | |||
| 161 | might_sleep(); | 165 | might_sleep(); |
| 162 | 166 | ||
| 163 | if (ext4_handle_valid(handle)) { | 167 | if (ext4_handle_valid(handle)) { |
| 168 | struct super_block *sb; | ||
| 169 | |||
| 170 | sb = handle->h_transaction->t_journal->j_private; | ||
| 171 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) { | ||
| 172 | jbd2_journal_abort_handle(handle); | ||
| 173 | return -EIO; | ||
| 174 | } | ||
| 164 | err = jbd2_journal_get_write_access(handle, bh); | 175 | err = jbd2_journal_get_write_access(handle, bh); |
| 165 | if (err) | 176 | if (err) |
| 166 | ext4_journal_abort_handle(where, line, __func__, bh, | 177 | ext4_journal_abort_handle(where, line, __func__, bh, |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 3e295d3350a9..2a97dff87b96 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -5334,7 +5334,8 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, | |||
| 5334 | ext4_lblk_t stop, *iterator, ex_start, ex_end; | 5334 | ext4_lblk_t stop, *iterator, ex_start, ex_end; |
| 5335 | 5335 | ||
| 5336 | /* Let path point to the last extent */ | 5336 | /* Let path point to the last extent */ |
| 5337 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); | 5337 | path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, |
| 5338 | EXT4_EX_NOCACHE); | ||
| 5338 | if (IS_ERR(path)) | 5339 | if (IS_ERR(path)) |
| 5339 | return PTR_ERR(path); | 5340 | return PTR_ERR(path); |
| 5340 | 5341 | ||
| @@ -5343,15 +5344,15 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, | |||
| 5343 | if (!extent) | 5344 | if (!extent) |
| 5344 | goto out; | 5345 | goto out; |
| 5345 | 5346 | ||
| 5346 | stop = le32_to_cpu(extent->ee_block) + | 5347 | stop = le32_to_cpu(extent->ee_block); |
| 5347 | ext4_ext_get_actual_len(extent); | ||
| 5348 | 5348 | ||
| 5349 | /* | 5349 | /* |
| 5350 | * In case of left shift, Don't start shifting extents until we make | 5350 | * In case of left shift, Don't start shifting extents until we make |
| 5351 | * sure the hole is big enough to accommodate the shift. | 5351 | * sure the hole is big enough to accommodate the shift. |
| 5352 | */ | 5352 | */ |
| 5353 | if (SHIFT == SHIFT_LEFT) { | 5353 | if (SHIFT == SHIFT_LEFT) { |
| 5354 | path = ext4_find_extent(inode, start - 1, &path, 0); | 5354 | path = ext4_find_extent(inode, start - 1, &path, |
| 5355 | EXT4_EX_NOCACHE); | ||
| 5355 | if (IS_ERR(path)) | 5356 | if (IS_ERR(path)) |
| 5356 | return PTR_ERR(path); | 5357 | return PTR_ERR(path); |
| 5357 | depth = path->p_depth; | 5358 | depth = path->p_depth; |
| @@ -5383,9 +5384,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, | |||
| 5383 | else | 5384 | else |
| 5384 | iterator = &stop; | 5385 | iterator = &stop; |
| 5385 | 5386 | ||
| 5386 | /* Its safe to start updating extents */ | 5387 | /* |
| 5387 | while (start < stop) { | 5388 | * Its safe to start updating extents. Start and stop are unsigned, so |
| 5388 | path = ext4_find_extent(inode, *iterator, &path, 0); | 5389 | * in case of right shift if extent with 0 block is reached, iterator |
| 5390 | * becomes NULL to indicate the end of the loop. | ||
| 5391 | */ | ||
| 5392 | while (iterator && start <= stop) { | ||
| 5393 | path = ext4_find_extent(inode, *iterator, &path, | ||
| 5394 | EXT4_EX_NOCACHE); | ||
| 5389 | if (IS_ERR(path)) | 5395 | if (IS_ERR(path)) |
| 5390 | return PTR_ERR(path); | 5396 | return PTR_ERR(path); |
| 5391 | depth = path->p_depth; | 5397 | depth = path->p_depth; |
| @@ -5412,8 +5418,11 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle, | |||
| 5412 | ext4_ext_get_actual_len(extent); | 5418 | ext4_ext_get_actual_len(extent); |
| 5413 | } else { | 5419 | } else { |
| 5414 | extent = EXT_FIRST_EXTENT(path[depth].p_hdr); | 5420 | extent = EXT_FIRST_EXTENT(path[depth].p_hdr); |
| 5415 | *iterator = le32_to_cpu(extent->ee_block) > 0 ? | 5421 | if (le32_to_cpu(extent->ee_block) > 0) |
| 5416 | le32_to_cpu(extent->ee_block) - 1 : 0; | 5422 | *iterator = le32_to_cpu(extent->ee_block) - 1; |
| 5423 | else | ||
| 5424 | /* Beginning is reached, end of the loop */ | ||
| 5425 | iterator = NULL; | ||
| 5417 | /* Update path extent in case we need to stop */ | 5426 | /* Update path extent in case we need to stop */ |
| 5418 | while (le32_to_cpu(extent->ee_block) < start) | 5427 | while (le32_to_cpu(extent->ee_block) < start) |
| 5419 | extent++; | 5428 | extent++; |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index d663d3d7c81c..87e11dfe3cde 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -57,6 +57,9 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
| 57 | 57 | ||
| 58 | static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | 58 | static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
| 59 | { | 59 | { |
| 60 | if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb)))) | ||
| 61 | return -EIO; | ||
| 62 | |||
| 60 | if (!iov_iter_count(to)) | 63 | if (!iov_iter_count(to)) |
| 61 | return 0; /* skip atime */ | 64 | return 0; /* skip atime */ |
| 62 | 65 | ||
| @@ -175,7 +178,6 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 175 | { | 178 | { |
| 176 | struct inode *inode = file_inode(iocb->ki_filp); | 179 | struct inode *inode = file_inode(iocb->ki_filp); |
| 177 | ssize_t ret; | 180 | ssize_t ret; |
| 178 | bool overwrite = false; | ||
| 179 | 181 | ||
| 180 | inode_lock(inode); | 182 | inode_lock(inode); |
| 181 | ret = ext4_write_checks(iocb, from); | 183 | ret = ext4_write_checks(iocb, from); |
| @@ -188,16 +190,9 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 188 | if (ret) | 190 | if (ret) |
| 189 | goto out; | 191 | goto out; |
| 190 | 192 | ||
| 191 | if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) { | ||
| 192 | overwrite = true; | ||
| 193 | downgrade_write(&inode->i_rwsem); | ||
| 194 | } | ||
| 195 | ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); | 193 | ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); |
| 196 | out: | 194 | out: |
| 197 | if (!overwrite) | 195 | inode_unlock(inode); |
| 198 | inode_unlock(inode); | ||
| 199 | else | ||
| 200 | inode_unlock_shared(inode); | ||
| 201 | if (ret > 0) | 196 | if (ret > 0) |
| 202 | ret = generic_write_sync(iocb, ret); | 197 | ret = generic_write_sync(iocb, ret); |
| 203 | return ret; | 198 | return ret; |
| @@ -213,6 +208,9 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 213 | int overwrite = 0; | 208 | int overwrite = 0; |
| 214 | ssize_t ret; | 209 | ssize_t ret; |
| 215 | 210 | ||
| 211 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 212 | return -EIO; | ||
| 213 | |||
| 216 | #ifdef CONFIG_FS_DAX | 214 | #ifdef CONFIG_FS_DAX |
| 217 | if (IS_DAX(inode)) | 215 | if (IS_DAX(inode)) |
| 218 | return ext4_dax_write_iter(iocb, from); | 216 | return ext4_dax_write_iter(iocb, from); |
| @@ -348,6 +346,9 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 348 | { | 346 | { |
| 349 | struct inode *inode = file->f_mapping->host; | 347 | struct inode *inode = file->f_mapping->host; |
| 350 | 348 | ||
| 349 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 350 | return -EIO; | ||
| 351 | |||
| 351 | if (ext4_encrypted_inode(inode)) { | 352 | if (ext4_encrypted_inode(inode)) { |
| 352 | int err = fscrypt_get_encryption_info(inode); | 353 | int err = fscrypt_get_encryption_info(inode); |
| 353 | if (err) | 354 | if (err) |
| @@ -375,6 +376,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
| 375 | char buf[64], *cp; | 376 | char buf[64], *cp; |
| 376 | int ret; | 377 | int ret; |
| 377 | 378 | ||
| 379 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 380 | return -EIO; | ||
| 381 | |||
| 378 | if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && | 382 | if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && |
| 379 | !(sb->s_flags & MS_RDONLY))) { | 383 | !(sb->s_flags & MS_RDONLY))) { |
| 380 | sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; | 384 | sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 88effb1053c7..9d549608fd30 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
| @@ -100,6 +100,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
| 100 | tid_t commit_tid; | 100 | tid_t commit_tid; |
| 101 | bool needs_barrier = false; | 101 | bool needs_barrier = false; |
| 102 | 102 | ||
| 103 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 104 | return -EIO; | ||
| 105 | |||
| 103 | J_ASSERT(ext4_journal_current_handle() == NULL); | 106 | J_ASSERT(ext4_journal_current_handle() == NULL); |
| 104 | 107 | ||
| 105 | trace_ext4_sync_file_enter(file, datasync); | 108 | trace_ext4_sync_file_enter(file, datasync); |
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index e026aa941fd5..38b8a96eb97c 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c | |||
| @@ -10,7 +10,8 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
| 13 | #include <linux/cryptohash.h> | 13 | #include <linux/compiler.h> |
| 14 | #include <linux/bitops.h> | ||
| 14 | #include "ext4.h" | 15 | #include "ext4.h" |
| 15 | 16 | ||
| 16 | #define DELTA 0x9E3779B9 | 17 | #define DELTA 0x9E3779B9 |
| @@ -32,6 +33,74 @@ static void TEA_transform(__u32 buf[4], __u32 const in[]) | |||
| 32 | buf[1] += b1; | 33 | buf[1] += b1; |
| 33 | } | 34 | } |
| 34 | 35 | ||
| 36 | /* F, G and H are basic MD4 functions: selection, majority, parity */ | ||
| 37 | #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) | ||
| 38 | #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) | ||
| 39 | #define H(x, y, z) ((x) ^ (y) ^ (z)) | ||
| 40 | |||
| 41 | /* | ||
| 42 | * The generic round function. The application is so specific that | ||
| 43 | * we don't bother protecting all the arguments with parens, as is generally | ||
| 44 | * good macro practice, in favor of extra legibility. | ||
| 45 | * Rotation is separate from addition to prevent recomputation | ||
| 46 | */ | ||
| 47 | #define ROUND(f, a, b, c, d, x, s) \ | ||
| 48 | (a += f(b, c, d) + x, a = rol32(a, s)) | ||
| 49 | #define K1 0 | ||
| 50 | #define K2 013240474631UL | ||
| 51 | #define K3 015666365641UL | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Basic cut-down MD4 transform. Returns only 32 bits of result. | ||
| 55 | */ | ||
| 56 | static __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]) | ||
| 57 | { | ||
| 58 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | ||
| 59 | |||
| 60 | /* Round 1 */ | ||
| 61 | ROUND(F, a, b, c, d, in[0] + K1, 3); | ||
| 62 | ROUND(F, d, a, b, c, in[1] + K1, 7); | ||
| 63 | ROUND(F, c, d, a, b, in[2] + K1, 11); | ||
| 64 | ROUND(F, b, c, d, a, in[3] + K1, 19); | ||
| 65 | ROUND(F, a, b, c, d, in[4] + K1, 3); | ||
| 66 | ROUND(F, d, a, b, c, in[5] + K1, 7); | ||
| 67 | ROUND(F, c, d, a, b, in[6] + K1, 11); | ||
| 68 | ROUND(F, b, c, d, a, in[7] + K1, 19); | ||
| 69 | |||
| 70 | /* Round 2 */ | ||
| 71 | ROUND(G, a, b, c, d, in[1] + K2, 3); | ||
| 72 | ROUND(G, d, a, b, c, in[3] + K2, 5); | ||
| 73 | ROUND(G, c, d, a, b, in[5] + K2, 9); | ||
| 74 | ROUND(G, b, c, d, a, in[7] + K2, 13); | ||
| 75 | ROUND(G, a, b, c, d, in[0] + K2, 3); | ||
| 76 | ROUND(G, d, a, b, c, in[2] + K2, 5); | ||
| 77 | ROUND(G, c, d, a, b, in[4] + K2, 9); | ||
| 78 | ROUND(G, b, c, d, a, in[6] + K2, 13); | ||
| 79 | |||
| 80 | /* Round 3 */ | ||
| 81 | ROUND(H, a, b, c, d, in[3] + K3, 3); | ||
| 82 | ROUND(H, d, a, b, c, in[7] + K3, 9); | ||
| 83 | ROUND(H, c, d, a, b, in[2] + K3, 11); | ||
| 84 | ROUND(H, b, c, d, a, in[6] + K3, 15); | ||
| 85 | ROUND(H, a, b, c, d, in[1] + K3, 3); | ||
| 86 | ROUND(H, d, a, b, c, in[5] + K3, 9); | ||
| 87 | ROUND(H, c, d, a, b, in[0] + K3, 11); | ||
| 88 | ROUND(H, b, c, d, a, in[4] + K3, 15); | ||
| 89 | |||
| 90 | buf[0] += a; | ||
| 91 | buf[1] += b; | ||
| 92 | buf[2] += c; | ||
| 93 | buf[3] += d; | ||
| 94 | |||
| 95 | return buf[1]; /* "most hashed" word */ | ||
| 96 | } | ||
| 97 | #undef ROUND | ||
| 98 | #undef K1 | ||
| 99 | #undef K2 | ||
| 100 | #undef K3 | ||
| 101 | #undef F | ||
| 102 | #undef G | ||
| 103 | #undef H | ||
| 35 | 104 | ||
| 36 | /* The old legacy hash */ | 105 | /* The old legacy hash */ |
| 37 | static __u32 dx_hack_hash_unsigned(const char *name, int len) | 106 | static __u32 dx_hack_hash_unsigned(const char *name, int len) |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index e57e8d90ea54..b14bae2598bc 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
| @@ -764,6 +764,9 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, | |||
| 764 | if (!dir || !dir->i_nlink) | 764 | if (!dir || !dir->i_nlink) |
| 765 | return ERR_PTR(-EPERM); | 765 | return ERR_PTR(-EPERM); |
| 766 | 766 | ||
| 767 | if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) | ||
| 768 | return ERR_PTR(-EIO); | ||
| 769 | |||
| 767 | if ((ext4_encrypted_inode(dir) || | 770 | if ((ext4_encrypted_inode(dir) || |
| 768 | DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) && | 771 | DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) && |
| 769 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { | 772 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { |
| @@ -771,7 +774,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, | |||
| 771 | if (err) | 774 | if (err) |
| 772 | return ERR_PTR(err); | 775 | return ERR_PTR(err); |
| 773 | if (!fscrypt_has_encryption_key(dir)) | 776 | if (!fscrypt_has_encryption_key(dir)) |
| 774 | return ERR_PTR(-EPERM); | 777 | return ERR_PTR(-ENOKEY); |
| 775 | if (!handle) | 778 | if (!handle) |
| 776 | nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb); | 779 | nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb); |
| 777 | encrypt = 1; | 780 | encrypt = 1; |
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 437df6a1a841..30a9f210d1e3 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c | |||
| @@ -215,6 +215,9 @@ static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc, | |||
| 215 | struct ext4_inode *raw_inode; | 215 | struct ext4_inode *raw_inode; |
| 216 | int cp_len = 0; | 216 | int cp_len = 0; |
| 217 | 217 | ||
| 218 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 219 | return; | ||
| 220 | |||
| 218 | BUG_ON(!EXT4_I(inode)->i_inline_off); | 221 | BUG_ON(!EXT4_I(inode)->i_inline_off); |
| 219 | BUG_ON(pos + len > EXT4_I(inode)->i_inline_size); | 222 | BUG_ON(pos + len > EXT4_I(inode)->i_inline_size); |
| 220 | 223 | ||
| @@ -381,7 +384,7 @@ out: | |||
| 381 | static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, | 384 | static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, |
| 382 | unsigned int len) | 385 | unsigned int len) |
| 383 | { | 386 | { |
| 384 | int ret, size; | 387 | int ret, size, no_expand; |
| 385 | struct ext4_inode_info *ei = EXT4_I(inode); | 388 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 386 | 389 | ||
| 387 | if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) | 390 | if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) |
| @@ -391,15 +394,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode, | |||
| 391 | if (size < len) | 394 | if (size < len) |
| 392 | return -ENOSPC; | 395 | return -ENOSPC; |
| 393 | 396 | ||
| 394 | down_write(&EXT4_I(inode)->xattr_sem); | 397 | ext4_write_lock_xattr(inode, &no_expand); |
| 395 | 398 | ||
| 396 | if (ei->i_inline_off) | 399 | if (ei->i_inline_off) |
| 397 | ret = ext4_update_inline_data(handle, inode, len); | 400 | ret = ext4_update_inline_data(handle, inode, len); |
| 398 | else | 401 | else |
| 399 | ret = ext4_create_inline_data(handle, inode, len); | 402 | ret = ext4_create_inline_data(handle, inode, len); |
| 400 | 403 | ||
| 401 | up_write(&EXT4_I(inode)->xattr_sem); | 404 | ext4_write_unlock_xattr(inode, &no_expand); |
| 402 | |||
| 403 | return ret; | 405 | return ret; |
| 404 | } | 406 | } |
| 405 | 407 | ||
| @@ -533,7 +535,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping, | |||
| 533 | struct inode *inode, | 535 | struct inode *inode, |
| 534 | unsigned flags) | 536 | unsigned flags) |
| 535 | { | 537 | { |
| 536 | int ret, needed_blocks; | 538 | int ret, needed_blocks, no_expand; |
| 537 | handle_t *handle = NULL; | 539 | handle_t *handle = NULL; |
| 538 | int retries = 0, sem_held = 0; | 540 | int retries = 0, sem_held = 0; |
| 539 | struct page *page = NULL; | 541 | struct page *page = NULL; |
| @@ -573,7 +575,7 @@ retry: | |||
| 573 | goto out; | 575 | goto out; |
| 574 | } | 576 | } |
| 575 | 577 | ||
| 576 | down_write(&EXT4_I(inode)->xattr_sem); | 578 | ext4_write_lock_xattr(inode, &no_expand); |
| 577 | sem_held = 1; | 579 | sem_held = 1; |
| 578 | /* If some one has already done this for us, just exit. */ | 580 | /* If some one has already done this for us, just exit. */ |
| 579 | if (!ext4_has_inline_data(inode)) { | 581 | if (!ext4_has_inline_data(inode)) { |
| @@ -610,7 +612,7 @@ retry: | |||
| 610 | put_page(page); | 612 | put_page(page); |
| 611 | page = NULL; | 613 | page = NULL; |
| 612 | ext4_orphan_add(handle, inode); | 614 | ext4_orphan_add(handle, inode); |
| 613 | up_write(&EXT4_I(inode)->xattr_sem); | 615 | ext4_write_unlock_xattr(inode, &no_expand); |
| 614 | sem_held = 0; | 616 | sem_held = 0; |
| 615 | ext4_journal_stop(handle); | 617 | ext4_journal_stop(handle); |
| 616 | handle = NULL; | 618 | handle = NULL; |
| @@ -636,7 +638,7 @@ out: | |||
| 636 | put_page(page); | 638 | put_page(page); |
| 637 | } | 639 | } |
| 638 | if (sem_held) | 640 | if (sem_held) |
| 639 | up_write(&EXT4_I(inode)->xattr_sem); | 641 | ext4_write_unlock_xattr(inode, &no_expand); |
| 640 | if (handle) | 642 | if (handle) |
| 641 | ext4_journal_stop(handle); | 643 | ext4_journal_stop(handle); |
| 642 | brelse(iloc.bh); | 644 | brelse(iloc.bh); |
| @@ -729,7 +731,7 @@ convert: | |||
| 729 | int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, | 731 | int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, |
| 730 | unsigned copied, struct page *page) | 732 | unsigned copied, struct page *page) |
| 731 | { | 733 | { |
| 732 | int ret; | 734 | int ret, no_expand; |
| 733 | void *kaddr; | 735 | void *kaddr; |
| 734 | struct ext4_iloc iloc; | 736 | struct ext4_iloc iloc; |
| 735 | 737 | ||
| @@ -747,7 +749,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, | |||
| 747 | goto out; | 749 | goto out; |
| 748 | } | 750 | } |
| 749 | 751 | ||
| 750 | down_write(&EXT4_I(inode)->xattr_sem); | 752 | ext4_write_lock_xattr(inode, &no_expand); |
| 751 | BUG_ON(!ext4_has_inline_data(inode)); | 753 | BUG_ON(!ext4_has_inline_data(inode)); |
| 752 | 754 | ||
| 753 | kaddr = kmap_atomic(page); | 755 | kaddr = kmap_atomic(page); |
| @@ -757,7 +759,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, | |||
| 757 | /* clear page dirty so that writepages wouldn't work for us. */ | 759 | /* clear page dirty so that writepages wouldn't work for us. */ |
| 758 | ClearPageDirty(page); | 760 | ClearPageDirty(page); |
| 759 | 761 | ||
| 760 | up_write(&EXT4_I(inode)->xattr_sem); | 762 | ext4_write_unlock_xattr(inode, &no_expand); |
| 761 | brelse(iloc.bh); | 763 | brelse(iloc.bh); |
| 762 | out: | 764 | out: |
| 763 | return copied; | 765 | return copied; |
| @@ -768,7 +770,7 @@ ext4_journalled_write_inline_data(struct inode *inode, | |||
| 768 | unsigned len, | 770 | unsigned len, |
| 769 | struct page *page) | 771 | struct page *page) |
| 770 | { | 772 | { |
| 771 | int ret; | 773 | int ret, no_expand; |
| 772 | void *kaddr; | 774 | void *kaddr; |
| 773 | struct ext4_iloc iloc; | 775 | struct ext4_iloc iloc; |
| 774 | 776 | ||
| @@ -778,11 +780,11 @@ ext4_journalled_write_inline_data(struct inode *inode, | |||
| 778 | return NULL; | 780 | return NULL; |
| 779 | } | 781 | } |
| 780 | 782 | ||
| 781 | down_write(&EXT4_I(inode)->xattr_sem); | 783 | ext4_write_lock_xattr(inode, &no_expand); |
| 782 | kaddr = kmap_atomic(page); | 784 | kaddr = kmap_atomic(page); |
| 783 | ext4_write_inline_data(inode, &iloc, kaddr, 0, len); | 785 | ext4_write_inline_data(inode, &iloc, kaddr, 0, len); |
| 784 | kunmap_atomic(kaddr); | 786 | kunmap_atomic(kaddr); |
| 785 | up_write(&EXT4_I(inode)->xattr_sem); | 787 | ext4_write_unlock_xattr(inode, &no_expand); |
| 786 | 788 | ||
| 787 | return iloc.bh; | 789 | return iloc.bh; |
| 788 | } | 790 | } |
| @@ -944,8 +946,15 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, | |||
| 944 | struct page *page) | 946 | struct page *page) |
| 945 | { | 947 | { |
| 946 | int i_size_changed = 0; | 948 | int i_size_changed = 0; |
| 949 | int ret; | ||
| 947 | 950 | ||
| 948 | copied = ext4_write_inline_data_end(inode, pos, len, copied, page); | 951 | ret = ext4_write_inline_data_end(inode, pos, len, copied, page); |
| 952 | if (ret < 0) { | ||
| 953 | unlock_page(page); | ||
| 954 | put_page(page); | ||
| 955 | return ret; | ||
| 956 | } | ||
| 957 | copied = ret; | ||
| 949 | 958 | ||
| 950 | /* | 959 | /* |
| 951 | * No need to use i_size_read() here, the i_size | 960 | * No need to use i_size_read() here, the i_size |
| @@ -1043,7 +1052,6 @@ static int ext4_add_dirent_to_inline(handle_t *handle, | |||
| 1043 | dir->i_mtime = dir->i_ctime = current_time(dir); | 1052 | dir->i_mtime = dir->i_ctime = current_time(dir); |
| 1044 | ext4_update_dx_flag(dir); | 1053 | ext4_update_dx_flag(dir); |
| 1045 | dir->i_version++; | 1054 | dir->i_version++; |
| 1046 | ext4_mark_inode_dirty(handle, dir); | ||
| 1047 | return 1; | 1055 | return 1; |
| 1048 | } | 1056 | } |
| 1049 | 1057 | ||
| @@ -1259,7 +1267,7 @@ out: | |||
| 1259 | int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, | 1267 | int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, |
| 1260 | struct inode *dir, struct inode *inode) | 1268 | struct inode *dir, struct inode *inode) |
| 1261 | { | 1269 | { |
| 1262 | int ret, inline_size; | 1270 | int ret, inline_size, no_expand; |
| 1263 | void *inline_start; | 1271 | void *inline_start; |
| 1264 | struct ext4_iloc iloc; | 1272 | struct ext4_iloc iloc; |
| 1265 | 1273 | ||
| @@ -1267,7 +1275,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, | |||
| 1267 | if (ret) | 1275 | if (ret) |
| 1268 | return ret; | 1276 | return ret; |
| 1269 | 1277 | ||
| 1270 | down_write(&EXT4_I(dir)->xattr_sem); | 1278 | ext4_write_lock_xattr(dir, &no_expand); |
| 1271 | if (!ext4_has_inline_data(dir)) | 1279 | if (!ext4_has_inline_data(dir)) |
| 1272 | goto out; | 1280 | goto out; |
| 1273 | 1281 | ||
| @@ -1312,8 +1320,8 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname, | |||
| 1312 | ret = ext4_convert_inline_data_nolock(handle, dir, &iloc); | 1320 | ret = ext4_convert_inline_data_nolock(handle, dir, &iloc); |
| 1313 | 1321 | ||
| 1314 | out: | 1322 | out: |
| 1323 | ext4_write_unlock_xattr(dir, &no_expand); | ||
| 1315 | ext4_mark_inode_dirty(handle, dir); | 1324 | ext4_mark_inode_dirty(handle, dir); |
| 1316 | up_write(&EXT4_I(dir)->xattr_sem); | ||
| 1317 | brelse(iloc.bh); | 1325 | brelse(iloc.bh); |
| 1318 | return ret; | 1326 | return ret; |
| 1319 | } | 1327 | } |
| @@ -1673,7 +1681,7 @@ int ext4_delete_inline_entry(handle_t *handle, | |||
| 1673 | struct buffer_head *bh, | 1681 | struct buffer_head *bh, |
| 1674 | int *has_inline_data) | 1682 | int *has_inline_data) |
| 1675 | { | 1683 | { |
| 1676 | int err, inline_size; | 1684 | int err, inline_size, no_expand; |
| 1677 | struct ext4_iloc iloc; | 1685 | struct ext4_iloc iloc; |
| 1678 | void *inline_start; | 1686 | void *inline_start; |
| 1679 | 1687 | ||
| @@ -1681,7 +1689,7 @@ int ext4_delete_inline_entry(handle_t *handle, | |||
| 1681 | if (err) | 1689 | if (err) |
| 1682 | return err; | 1690 | return err; |
| 1683 | 1691 | ||
| 1684 | down_write(&EXT4_I(dir)->xattr_sem); | 1692 | ext4_write_lock_xattr(dir, &no_expand); |
| 1685 | if (!ext4_has_inline_data(dir)) { | 1693 | if (!ext4_has_inline_data(dir)) { |
| 1686 | *has_inline_data = 0; | 1694 | *has_inline_data = 0; |
| 1687 | goto out; | 1695 | goto out; |
| @@ -1709,13 +1717,11 @@ int ext4_delete_inline_entry(handle_t *handle, | |||
| 1709 | if (err) | 1717 | if (err) |
| 1710 | goto out; | 1718 | goto out; |
| 1711 | 1719 | ||
| 1712 | err = ext4_mark_inode_dirty(handle, dir); | ||
| 1713 | if (unlikely(err)) | ||
| 1714 | goto out; | ||
| 1715 | |||
| 1716 | ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size); | 1720 | ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size); |
| 1717 | out: | 1721 | out: |
| 1718 | up_write(&EXT4_I(dir)->xattr_sem); | 1722 | ext4_write_unlock_xattr(dir, &no_expand); |
| 1723 | if (likely(err == 0)) | ||
| 1724 | err = ext4_mark_inode_dirty(handle, dir); | ||
| 1719 | brelse(iloc.bh); | 1725 | brelse(iloc.bh); |
| 1720 | if (err != -ENOENT) | 1726 | if (err != -ENOENT) |
| 1721 | ext4_std_error(dir->i_sb, err); | 1727 | ext4_std_error(dir->i_sb, err); |
| @@ -1814,11 +1820,11 @@ out: | |||
| 1814 | 1820 | ||
| 1815 | int ext4_destroy_inline_data(handle_t *handle, struct inode *inode) | 1821 | int ext4_destroy_inline_data(handle_t *handle, struct inode *inode) |
| 1816 | { | 1822 | { |
| 1817 | int ret; | 1823 | int ret, no_expand; |
| 1818 | 1824 | ||
| 1819 | down_write(&EXT4_I(inode)->xattr_sem); | 1825 | ext4_write_lock_xattr(inode, &no_expand); |
| 1820 | ret = ext4_destroy_inline_data_nolock(handle, inode); | 1826 | ret = ext4_destroy_inline_data_nolock(handle, inode); |
| 1821 | up_write(&EXT4_I(inode)->xattr_sem); | 1827 | ext4_write_unlock_xattr(inode, &no_expand); |
| 1822 | 1828 | ||
| 1823 | return ret; | 1829 | return ret; |
| 1824 | } | 1830 | } |
| @@ -1900,10 +1906,10 @@ out: | |||
| 1900 | return error; | 1906 | return error; |
| 1901 | } | 1907 | } |
| 1902 | 1908 | ||
| 1903 | void ext4_inline_data_truncate(struct inode *inode, int *has_inline) | 1909 | int ext4_inline_data_truncate(struct inode *inode, int *has_inline) |
| 1904 | { | 1910 | { |
| 1905 | handle_t *handle; | 1911 | handle_t *handle; |
| 1906 | int inline_size, value_len, needed_blocks; | 1912 | int inline_size, value_len, needed_blocks, no_expand, err = 0; |
| 1907 | size_t i_size; | 1913 | size_t i_size; |
| 1908 | void *value = NULL; | 1914 | void *value = NULL; |
| 1909 | struct ext4_xattr_ibody_find is = { | 1915 | struct ext4_xattr_ibody_find is = { |
| @@ -1918,19 +1924,19 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline) | |||
| 1918 | needed_blocks = ext4_writepage_trans_blocks(inode); | 1924 | needed_blocks = ext4_writepage_trans_blocks(inode); |
| 1919 | handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks); | 1925 | handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks); |
| 1920 | if (IS_ERR(handle)) | 1926 | if (IS_ERR(handle)) |
| 1921 | return; | 1927 | return PTR_ERR(handle); |
| 1922 | 1928 | ||
| 1923 | down_write(&EXT4_I(inode)->xattr_sem); | 1929 | ext4_write_lock_xattr(inode, &no_expand); |
| 1924 | if (!ext4_has_inline_data(inode)) { | 1930 | if (!ext4_has_inline_data(inode)) { |
| 1925 | *has_inline = 0; | 1931 | *has_inline = 0; |
| 1926 | ext4_journal_stop(handle); | 1932 | ext4_journal_stop(handle); |
| 1927 | return; | 1933 | return 0; |
| 1928 | } | 1934 | } |
| 1929 | 1935 | ||
| 1930 | if (ext4_orphan_add(handle, inode)) | 1936 | if ((err = ext4_orphan_add(handle, inode)) != 0) |
| 1931 | goto out; | 1937 | goto out; |
| 1932 | 1938 | ||
| 1933 | if (ext4_get_inode_loc(inode, &is.iloc)) | 1939 | if ((err = ext4_get_inode_loc(inode, &is.iloc)) != 0) |
| 1934 | goto out; | 1940 | goto out; |
| 1935 | 1941 | ||
| 1936 | down_write(&EXT4_I(inode)->i_data_sem); | 1942 | down_write(&EXT4_I(inode)->i_data_sem); |
| @@ -1941,24 +1947,29 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline) | |||
| 1941 | if (i_size < inline_size) { | 1947 | if (i_size < inline_size) { |
| 1942 | /* Clear the content in the xattr space. */ | 1948 | /* Clear the content in the xattr space. */ |
| 1943 | if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) { | 1949 | if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) { |
| 1944 | if (ext4_xattr_ibody_find(inode, &i, &is)) | 1950 | if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0) |
| 1945 | goto out_error; | 1951 | goto out_error; |
| 1946 | 1952 | ||
| 1947 | BUG_ON(is.s.not_found); | 1953 | BUG_ON(is.s.not_found); |
| 1948 | 1954 | ||
| 1949 | value_len = le32_to_cpu(is.s.here->e_value_size); | 1955 | value_len = le32_to_cpu(is.s.here->e_value_size); |
| 1950 | value = kmalloc(value_len, GFP_NOFS); | 1956 | value = kmalloc(value_len, GFP_NOFS); |
| 1951 | if (!value) | 1957 | if (!value) { |
| 1958 | err = -ENOMEM; | ||
| 1952 | goto out_error; | 1959 | goto out_error; |
| 1960 | } | ||
| 1953 | 1961 | ||
| 1954 | if (ext4_xattr_ibody_get(inode, i.name_index, i.name, | 1962 | err = ext4_xattr_ibody_get(inode, i.name_index, |
| 1955 | value, value_len)) | 1963 | i.name, value, value_len); |
| 1964 | if (err <= 0) | ||
| 1956 | goto out_error; | 1965 | goto out_error; |
| 1957 | 1966 | ||
| 1958 | i.value = value; | 1967 | i.value = value; |
| 1959 | i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ? | 1968 | i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ? |
| 1960 | i_size - EXT4_MIN_INLINE_DATA_SIZE : 0; | 1969 | i_size - EXT4_MIN_INLINE_DATA_SIZE : 0; |
| 1961 | if (ext4_xattr_ibody_inline_set(handle, inode, &i, &is)) | 1970 | err = ext4_xattr_ibody_inline_set(handle, inode, |
| 1971 | &i, &is); | ||
| 1972 | if (err) | ||
| 1962 | goto out_error; | 1973 | goto out_error; |
| 1963 | } | 1974 | } |
| 1964 | 1975 | ||
| @@ -1978,23 +1989,24 @@ out_error: | |||
| 1978 | up_write(&EXT4_I(inode)->i_data_sem); | 1989 | up_write(&EXT4_I(inode)->i_data_sem); |
| 1979 | out: | 1990 | out: |
| 1980 | brelse(is.iloc.bh); | 1991 | brelse(is.iloc.bh); |
| 1981 | up_write(&EXT4_I(inode)->xattr_sem); | 1992 | ext4_write_unlock_xattr(inode, &no_expand); |
| 1982 | kfree(value); | 1993 | kfree(value); |
| 1983 | if (inode->i_nlink) | 1994 | if (inode->i_nlink) |
| 1984 | ext4_orphan_del(handle, inode); | 1995 | ext4_orphan_del(handle, inode); |
| 1985 | 1996 | ||
| 1986 | inode->i_mtime = inode->i_ctime = current_time(inode); | 1997 | if (err == 0) { |
| 1987 | ext4_mark_inode_dirty(handle, inode); | 1998 | inode->i_mtime = inode->i_ctime = current_time(inode); |
| 1988 | if (IS_SYNC(inode)) | 1999 | err = ext4_mark_inode_dirty(handle, inode); |
| 1989 | ext4_handle_sync(handle); | 2000 | if (IS_SYNC(inode)) |
| 1990 | 2001 | ext4_handle_sync(handle); | |
| 2002 | } | ||
| 1991 | ext4_journal_stop(handle); | 2003 | ext4_journal_stop(handle); |
| 1992 | return; | 2004 | return err; |
| 1993 | } | 2005 | } |
| 1994 | 2006 | ||
| 1995 | int ext4_convert_inline_data(struct inode *inode) | 2007 | int ext4_convert_inline_data(struct inode *inode) |
| 1996 | { | 2008 | { |
| 1997 | int error, needed_blocks; | 2009 | int error, needed_blocks, no_expand; |
| 1998 | handle_t *handle; | 2010 | handle_t *handle; |
| 1999 | struct ext4_iloc iloc; | 2011 | struct ext4_iloc iloc; |
| 2000 | 2012 | ||
| @@ -2016,15 +2028,10 @@ int ext4_convert_inline_data(struct inode *inode) | |||
| 2016 | goto out_free; | 2028 | goto out_free; |
| 2017 | } | 2029 | } |
| 2018 | 2030 | ||
| 2019 | down_write(&EXT4_I(inode)->xattr_sem); | 2031 | ext4_write_lock_xattr(inode, &no_expand); |
| 2020 | if (!ext4_has_inline_data(inode)) { | 2032 | if (ext4_has_inline_data(inode)) |
| 2021 | up_write(&EXT4_I(inode)->xattr_sem); | 2033 | error = ext4_convert_inline_data_nolock(handle, inode, &iloc); |
| 2022 | goto out; | 2034 | ext4_write_unlock_xattr(inode, &no_expand); |
| 2023 | } | ||
| 2024 | |||
| 2025 | error = ext4_convert_inline_data_nolock(handle, inode, &iloc); | ||
| 2026 | up_write(&EXT4_I(inode)->xattr_sem); | ||
| 2027 | out: | ||
| 2028 | ext4_journal_stop(handle); | 2035 | ext4_journal_stop(handle); |
| 2029 | out_free: | 2036 | out_free: |
| 2030 | brelse(iloc.bh); | 2037 | brelse(iloc.bh); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 88d57af1b516..f622d4a577e3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -1189,6 +1189,9 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, | |||
| 1189 | pgoff_t index; | 1189 | pgoff_t index; |
| 1190 | unsigned from, to; | 1190 | unsigned from, to; |
| 1191 | 1191 | ||
| 1192 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 1193 | return -EIO; | ||
| 1194 | |||
| 1192 | trace_ext4_write_begin(inode, pos, len, flags); | 1195 | trace_ext4_write_begin(inode, pos, len, flags); |
| 1193 | /* | 1196 | /* |
| 1194 | * Reserve one block more for addition to orphan list in case | 1197 | * Reserve one block more for addition to orphan list in case |
| @@ -1330,8 +1333,11 @@ static int ext4_write_end(struct file *file, | |||
| 1330 | if (ext4_has_inline_data(inode)) { | 1333 | if (ext4_has_inline_data(inode)) { |
| 1331 | ret = ext4_write_inline_data_end(inode, pos, len, | 1334 | ret = ext4_write_inline_data_end(inode, pos, len, |
| 1332 | copied, page); | 1335 | copied, page); |
| 1333 | if (ret < 0) | 1336 | if (ret < 0) { |
| 1337 | unlock_page(page); | ||
| 1338 | put_page(page); | ||
| 1334 | goto errout; | 1339 | goto errout; |
| 1340 | } | ||
| 1335 | copied = ret; | 1341 | copied = ret; |
| 1336 | } else | 1342 | } else |
| 1337 | copied = block_write_end(file, mapping, pos, | 1343 | copied = block_write_end(file, mapping, pos, |
| @@ -1385,7 +1391,9 @@ errout: | |||
| 1385 | * set the buffer to be dirty, since in data=journalled mode we need | 1391 | * set the buffer to be dirty, since in data=journalled mode we need |
| 1386 | * to call ext4_handle_dirty_metadata() instead. | 1392 | * to call ext4_handle_dirty_metadata() instead. |
| 1387 | */ | 1393 | */ |
| 1388 | static void zero_new_buffers(struct page *page, unsigned from, unsigned to) | 1394 | static void ext4_journalled_zero_new_buffers(handle_t *handle, |
| 1395 | struct page *page, | ||
| 1396 | unsigned from, unsigned to) | ||
| 1389 | { | 1397 | { |
| 1390 | unsigned int block_start = 0, block_end; | 1398 | unsigned int block_start = 0, block_end; |
| 1391 | struct buffer_head *head, *bh; | 1399 | struct buffer_head *head, *bh; |
| @@ -1402,7 +1410,7 @@ static void zero_new_buffers(struct page *page, unsigned from, unsigned to) | |||
| 1402 | size = min(to, block_end) - start; | 1410 | size = min(to, block_end) - start; |
| 1403 | 1411 | ||
| 1404 | zero_user(page, start, size); | 1412 | zero_user(page, start, size); |
| 1405 | set_buffer_uptodate(bh); | 1413 | write_end_fn(handle, bh); |
| 1406 | } | 1414 | } |
| 1407 | clear_buffer_new(bh); | 1415 | clear_buffer_new(bh); |
| 1408 | } | 1416 | } |
| @@ -1431,18 +1439,25 @@ static int ext4_journalled_write_end(struct file *file, | |||
| 1431 | 1439 | ||
| 1432 | BUG_ON(!ext4_handle_valid(handle)); | 1440 | BUG_ON(!ext4_handle_valid(handle)); |
| 1433 | 1441 | ||
| 1434 | if (ext4_has_inline_data(inode)) | 1442 | if (ext4_has_inline_data(inode)) { |
| 1435 | copied = ext4_write_inline_data_end(inode, pos, len, | 1443 | ret = ext4_write_inline_data_end(inode, pos, len, |
| 1436 | copied, page); | 1444 | copied, page); |
| 1437 | else { | 1445 | if (ret < 0) { |
| 1438 | if (copied < len) { | 1446 | unlock_page(page); |
| 1439 | if (!PageUptodate(page)) | 1447 | put_page(page); |
| 1440 | copied = 0; | 1448 | goto errout; |
| 1441 | zero_new_buffers(page, from+copied, to); | ||
| 1442 | } | 1449 | } |
| 1443 | 1450 | copied = ret; | |
| 1451 | } else if (unlikely(copied < len) && !PageUptodate(page)) { | ||
| 1452 | copied = 0; | ||
| 1453 | ext4_journalled_zero_new_buffers(handle, page, from, to); | ||
| 1454 | } else { | ||
| 1455 | if (unlikely(copied < len)) | ||
| 1456 | ext4_journalled_zero_new_buffers(handle, page, | ||
| 1457 | from + copied, to); | ||
| 1444 | ret = ext4_walk_page_buffers(handle, page_buffers(page), from, | 1458 | ret = ext4_walk_page_buffers(handle, page_buffers(page), from, |
| 1445 | to, &partial, write_end_fn); | 1459 | from + copied, &partial, |
| 1460 | write_end_fn); | ||
| 1446 | if (!partial) | 1461 | if (!partial) |
| 1447 | SetPageUptodate(page); | 1462 | SetPageUptodate(page); |
| 1448 | } | 1463 | } |
| @@ -1468,6 +1483,7 @@ static int ext4_journalled_write_end(struct file *file, | |||
| 1468 | */ | 1483 | */ |
| 1469 | ext4_orphan_add(handle, inode); | 1484 | ext4_orphan_add(handle, inode); |
| 1470 | 1485 | ||
| 1486 | errout: | ||
| 1471 | ret2 = ext4_journal_stop(handle); | 1487 | ret2 = ext4_journal_stop(handle); |
| 1472 | if (!ret) | 1488 | if (!ret) |
| 1473 | ret = ret2; | 1489 | ret = ret2; |
| @@ -2034,6 +2050,12 @@ static int ext4_writepage(struct page *page, | |||
| 2034 | struct ext4_io_submit io_submit; | 2050 | struct ext4_io_submit io_submit; |
| 2035 | bool keep_towrite = false; | 2051 | bool keep_towrite = false; |
| 2036 | 2052 | ||
| 2053 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) { | ||
| 2054 | ext4_invalidatepage(page, 0, PAGE_SIZE); | ||
| 2055 | unlock_page(page); | ||
| 2056 | return -EIO; | ||
| 2057 | } | ||
| 2058 | |||
| 2037 | trace_ext4_writepage(page); | 2059 | trace_ext4_writepage(page); |
| 2038 | size = i_size_read(inode); | 2060 | size = i_size_read(inode); |
| 2039 | if (page->index == size >> PAGE_SHIFT) | 2061 | if (page->index == size >> PAGE_SHIFT) |
| @@ -2409,7 +2431,8 @@ static int mpage_map_and_submit_extent(handle_t *handle, | |||
| 2409 | if (err < 0) { | 2431 | if (err < 0) { |
| 2410 | struct super_block *sb = inode->i_sb; | 2432 | struct super_block *sb = inode->i_sb; |
| 2411 | 2433 | ||
| 2412 | if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) | 2434 | if (ext4_forced_shutdown(EXT4_SB(sb)) || |
| 2435 | EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) | ||
| 2413 | goto invalidate_dirty_pages; | 2436 | goto invalidate_dirty_pages; |
| 2414 | /* | 2437 | /* |
| 2415 | * Let the uper layers retry transient errors. | 2438 | * Let the uper layers retry transient errors. |
| @@ -2464,8 +2487,8 @@ update_disksize: | |||
| 2464 | disksize = i_size; | 2487 | disksize = i_size; |
| 2465 | if (disksize > EXT4_I(inode)->i_disksize) | 2488 | if (disksize > EXT4_I(inode)->i_disksize) |
| 2466 | EXT4_I(inode)->i_disksize = disksize; | 2489 | EXT4_I(inode)->i_disksize = disksize; |
| 2467 | err2 = ext4_mark_inode_dirty(handle, inode); | ||
| 2468 | up_write(&EXT4_I(inode)->i_data_sem); | 2490 | up_write(&EXT4_I(inode)->i_data_sem); |
| 2491 | err2 = ext4_mark_inode_dirty(handle, inode); | ||
| 2469 | if (err2) | 2492 | if (err2) |
| 2470 | ext4_error(inode->i_sb, | 2493 | ext4_error(inode->i_sb, |
| 2471 | "Failed to mark inode %lu dirty", | 2494 | "Failed to mark inode %lu dirty", |
| @@ -2631,6 +2654,9 @@ static int ext4_writepages(struct address_space *mapping, | |||
| 2631 | struct blk_plug plug; | 2654 | struct blk_plug plug; |
| 2632 | bool give_up_on_write = false; | 2655 | bool give_up_on_write = false; |
| 2633 | 2656 | ||
| 2657 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 2658 | return -EIO; | ||
| 2659 | |||
| 2634 | percpu_down_read(&sbi->s_journal_flag_rwsem); | 2660 | percpu_down_read(&sbi->s_journal_flag_rwsem); |
| 2635 | trace_ext4_writepages(inode, wbc); | 2661 | trace_ext4_writepages(inode, wbc); |
| 2636 | 2662 | ||
| @@ -2667,7 +2693,8 @@ static int ext4_writepages(struct address_space *mapping, | |||
| 2667 | * *never* be called, so if that ever happens, we would want | 2693 | * *never* be called, so if that ever happens, we would want |
| 2668 | * the stack trace. | 2694 | * the stack trace. |
| 2669 | */ | 2695 | */ |
| 2670 | if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { | 2696 | if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || |
| 2697 | sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { | ||
| 2671 | ret = -EROFS; | 2698 | ret = -EROFS; |
| 2672 | goto out_writepages; | 2699 | goto out_writepages; |
| 2673 | } | 2700 | } |
| @@ -2892,6 +2919,9 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, | |||
| 2892 | struct inode *inode = mapping->host; | 2919 | struct inode *inode = mapping->host; |
| 2893 | handle_t *handle; | 2920 | handle_t *handle; |
| 2894 | 2921 | ||
| 2922 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 2923 | return -EIO; | ||
| 2924 | |||
| 2895 | index = pos >> PAGE_SHIFT; | 2925 | index = pos >> PAGE_SHIFT; |
| 2896 | 2926 | ||
| 2897 | if (ext4_nonda_switch(inode->i_sb) || | 2927 | if (ext4_nonda_switch(inode->i_sb) || |
| @@ -3914,6 +3944,10 @@ static int ext4_block_truncate_page(handle_t *handle, | |||
| 3914 | unsigned blocksize; | 3944 | unsigned blocksize; |
| 3915 | struct inode *inode = mapping->host; | 3945 | struct inode *inode = mapping->host; |
| 3916 | 3946 | ||
| 3947 | /* If we are processing an encrypted inode during orphan list handling */ | ||
| 3948 | if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode)) | ||
| 3949 | return 0; | ||
| 3950 | |||
| 3917 | blocksize = inode->i_sb->s_blocksize; | 3951 | blocksize = inode->i_sb->s_blocksize; |
| 3918 | length = blocksize - (offset & (blocksize - 1)); | 3952 | length = blocksize - (offset & (blocksize - 1)); |
| 3919 | 3953 | ||
| @@ -4222,7 +4256,9 @@ int ext4_truncate(struct inode *inode) | |||
| 4222 | if (ext4_has_inline_data(inode)) { | 4256 | if (ext4_has_inline_data(inode)) { |
| 4223 | int has_inline = 1; | 4257 | int has_inline = 1; |
| 4224 | 4258 | ||
| 4225 | ext4_inline_data_truncate(inode, &has_inline); | 4259 | err = ext4_inline_data_truncate(inode, &has_inline); |
| 4260 | if (err) | ||
| 4261 | return err; | ||
| 4226 | if (has_inline) | 4262 | if (has_inline) |
| 4227 | return 0; | 4263 | return 0; |
| 4228 | } | 4264 | } |
| @@ -5197,6 +5233,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 5197 | int orphan = 0; | 5233 | int orphan = 0; |
| 5198 | const unsigned int ia_valid = attr->ia_valid; | 5234 | const unsigned int ia_valid = attr->ia_valid; |
| 5199 | 5235 | ||
| 5236 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 5237 | return -EIO; | ||
| 5238 | |||
| 5200 | error = setattr_prepare(dentry, attr); | 5239 | error = setattr_prepare(dentry, attr); |
| 5201 | if (error) | 5240 | if (error) |
| 5202 | return error; | 5241 | return error; |
| @@ -5483,6 +5522,9 @@ int ext4_mark_iloc_dirty(handle_t *handle, | |||
| 5483 | { | 5522 | { |
| 5484 | int err = 0; | 5523 | int err = 0; |
| 5485 | 5524 | ||
| 5525 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 5526 | return -EIO; | ||
| 5527 | |||
| 5486 | if (IS_I_VERSION(inode)) | 5528 | if (IS_I_VERSION(inode)) |
| 5487 | inode_inc_iversion(inode); | 5529 | inode_inc_iversion(inode); |
| 5488 | 5530 | ||
| @@ -5506,6 +5548,9 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode, | |||
| 5506 | { | 5548 | { |
| 5507 | int err; | 5549 | int err; |
| 5508 | 5550 | ||
| 5551 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 5552 | return -EIO; | ||
| 5553 | |||
| 5509 | err = ext4_get_inode_loc(inode, iloc); | 5554 | err = ext4_get_inode_loc(inode, iloc); |
| 5510 | if (!err) { | 5555 | if (!err) { |
| 5511 | BUFFER_TRACE(iloc->bh, "get_write_access"); | 5556 | BUFFER_TRACE(iloc->bh, "get_write_access"); |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index d534399cf607..a4273ddb9922 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/quotaops.h> | 16 | #include <linux/quotaops.h> |
| 17 | #include <linux/uuid.h> | 17 | #include <linux/uuid.h> |
| 18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
| 19 | #include <linux/delay.h> | ||
| 19 | #include "ext4_jbd2.h" | 20 | #include "ext4_jbd2.h" |
| 20 | #include "ext4.h" | 21 | #include "ext4.h" |
| 21 | 22 | ||
| @@ -442,6 +443,52 @@ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags) | |||
| 442 | return iflags; | 443 | return iflags; |
| 443 | } | 444 | } |
| 444 | 445 | ||
| 446 | int ext4_shutdown(struct super_block *sb, unsigned long arg) | ||
| 447 | { | ||
| 448 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
| 449 | __u32 flags; | ||
| 450 | |||
| 451 | if (!capable(CAP_SYS_ADMIN)) | ||
| 452 | return -EPERM; | ||
| 453 | |||
| 454 | if (get_user(flags, (__u32 __user *)arg)) | ||
| 455 | return -EFAULT; | ||
| 456 | |||
| 457 | if (flags > EXT4_GOING_FLAGS_NOLOGFLUSH) | ||
| 458 | return -EINVAL; | ||
| 459 | |||
| 460 | if (ext4_forced_shutdown(sbi)) | ||
| 461 | return 0; | ||
| 462 | |||
| 463 | ext4_msg(sb, KERN_ALERT, "shut down requested (%d)", flags); | ||
| 464 | |||
| 465 | switch (flags) { | ||
| 466 | case EXT4_GOING_FLAGS_DEFAULT: | ||
| 467 | freeze_bdev(sb->s_bdev); | ||
| 468 | set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); | ||
| 469 | thaw_bdev(sb->s_bdev, sb); | ||
| 470 | break; | ||
| 471 | case EXT4_GOING_FLAGS_LOGFLUSH: | ||
| 472 | set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); | ||
| 473 | if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) { | ||
| 474 | (void) ext4_force_commit(sb); | ||
| 475 | jbd2_journal_abort(sbi->s_journal, 0); | ||
| 476 | } | ||
| 477 | break; | ||
| 478 | case EXT4_GOING_FLAGS_NOLOGFLUSH: | ||
| 479 | set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags); | ||
| 480 | if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) { | ||
| 481 | msleep(100); | ||
| 482 | jbd2_journal_abort(sbi->s_journal, 0); | ||
| 483 | } | ||
| 484 | break; | ||
| 485 | default: | ||
| 486 | return -EINVAL; | ||
| 487 | } | ||
| 488 | clear_opt(sb, DISCARD); | ||
| 489 | return 0; | ||
| 490 | } | ||
| 491 | |||
| 445 | long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 492 | long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 446 | { | 493 | { |
| 447 | struct inode *inode = file_inode(filp); | 494 | struct inode *inode = file_inode(filp); |
| @@ -893,6 +940,8 @@ resizefs_out: | |||
| 893 | 940 | ||
| 894 | return 0; | 941 | return 0; |
| 895 | } | 942 | } |
| 943 | case EXT4_IOC_SHUTDOWN: | ||
| 944 | return ext4_shutdown(sb, arg); | ||
| 896 | default: | 945 | default: |
| 897 | return -ENOTTY; | 946 | return -ENOTTY; |
| 898 | } | 947 | } |
| @@ -959,6 +1008,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 959 | case EXT4_IOC_SET_ENCRYPTION_POLICY: | 1008 | case EXT4_IOC_SET_ENCRYPTION_POLICY: |
| 960 | case EXT4_IOC_GET_ENCRYPTION_PWSALT: | 1009 | case EXT4_IOC_GET_ENCRYPTION_PWSALT: |
| 961 | case EXT4_IOC_GET_ENCRYPTION_POLICY: | 1010 | case EXT4_IOC_GET_ENCRYPTION_POLICY: |
| 1011 | case EXT4_IOC_SHUTDOWN: | ||
| 962 | break; | 1012 | break; |
| 963 | default: | 1013 | default: |
| 964 | return -ENOIOCTLCMD; | 1014 | return -ENOIOCTLCMD; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 7ae43c59bc79..10c62de642c6 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -1556,7 +1556,17 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block, | |||
| 1556 | ex->fe_len += 1 << order; | 1556 | ex->fe_len += 1 << order; |
| 1557 | } | 1557 | } |
| 1558 | 1558 | ||
| 1559 | BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); | 1559 | if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) { |
| 1560 | /* Should never happen! (but apparently sometimes does?!?) */ | ||
| 1561 | WARN_ON(1); | ||
| 1562 | ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent " | ||
| 1563 | "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", | ||
| 1564 | block, order, needed, ex->fe_group, ex->fe_start, | ||
| 1565 | ex->fe_len, ex->fe_logical); | ||
| 1566 | ex->fe_len = 0; | ||
| 1567 | ex->fe_start = 0; | ||
| 1568 | ex->fe_group = 0; | ||
| 1569 | } | ||
| 1560 | return ex->fe_len; | 1570 | return ex->fe_len; |
| 1561 | } | 1571 | } |
| 1562 | 1572 | ||
| @@ -2136,8 +2146,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) | |||
| 2136 | * We search using buddy data only if the order of the request | 2146 | * We search using buddy data only if the order of the request |
| 2137 | * is greater than equal to the sbi_s_mb_order2_reqs | 2147 | * is greater than equal to the sbi_s_mb_order2_reqs |
| 2138 | * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req | 2148 | * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req |
| 2149 | * We also support searching for power-of-two requests only for | ||
| 2150 | * requests upto maximum buddy size we have constructed. | ||
| 2139 | */ | 2151 | */ |
| 2140 | if (i >= sbi->s_mb_order2_reqs) { | 2152 | if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) { |
| 2141 | /* | 2153 | /* |
| 2142 | * This should tell if fe_len is exactly power of 2 | 2154 | * This should tell if fe_len is exactly power of 2 |
| 2143 | */ | 2155 | */ |
| @@ -2207,7 +2219,7 @@ repeat: | |||
| 2207 | } | 2219 | } |
| 2208 | 2220 | ||
| 2209 | ac->ac_groups_scanned++; | 2221 | ac->ac_groups_scanned++; |
| 2210 | if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) | 2222 | if (cr == 0) |
| 2211 | ext4_mb_simple_scan_group(ac, &e4b); | 2223 | ext4_mb_simple_scan_group(ac, &e4b); |
| 2212 | else if (cr == 1 && sbi->s_stripe && | 2224 | else if (cr == 1 && sbi->s_stripe && |
| 2213 | !(ac->ac_g_ex.fe_len % sbi->s_stripe)) | 2225 | !(ac->ac_g_ex.fe_len % sbi->s_stripe)) |
| @@ -3123,6 +3135,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, | |||
| 3123 | if (ar->pright && start + size - 1 >= ar->lright) | 3135 | if (ar->pright && start + size - 1 >= ar->lright) |
| 3124 | size -= start + size - ar->lright; | 3136 | size -= start + size - ar->lright; |
| 3125 | 3137 | ||
| 3138 | /* | ||
| 3139 | * Trim allocation request for filesystems with artificially small | ||
| 3140 | * groups. | ||
| 3141 | */ | ||
| 3142 | if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) | ||
| 3143 | size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); | ||
| 3144 | |||
| 3126 | end = start + size; | 3145 | end = start + size; |
| 3127 | 3146 | ||
| 3128 | /* check we don't cross already preallocated blocks */ | 3147 | /* check we don't cross already preallocated blocks */ |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index eadba919f26b..6ad612c576fc 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
| @@ -1378,6 +1378,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, | |||
| 1378 | return NULL; | 1378 | return NULL; |
| 1379 | 1379 | ||
| 1380 | retval = ext4_fname_setup_filename(dir, d_name, 1, &fname); | 1380 | retval = ext4_fname_setup_filename(dir, d_name, 1, &fname); |
| 1381 | if (retval == -ENOENT) | ||
| 1382 | return NULL; | ||
| 1381 | if (retval) | 1383 | if (retval) |
| 1382 | return ERR_PTR(retval); | 1384 | return ERR_PTR(retval); |
| 1383 | 1385 | ||
| @@ -1616,13 +1618,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi | |||
| 1616 | !fscrypt_has_permitted_context(dir, inode)) { | 1618 | !fscrypt_has_permitted_context(dir, inode)) { |
| 1617 | int nokey = ext4_encrypted_inode(inode) && | 1619 | int nokey = ext4_encrypted_inode(inode) && |
| 1618 | !fscrypt_has_encryption_key(inode); | 1620 | !fscrypt_has_encryption_key(inode); |
| 1619 | iput(inode); | 1621 | if (nokey) { |
| 1620 | if (nokey) | 1622 | iput(inode); |
| 1621 | return ERR_PTR(-ENOKEY); | 1623 | return ERR_PTR(-ENOKEY); |
| 1624 | } | ||
| 1622 | ext4_warning(inode->i_sb, | 1625 | ext4_warning(inode->i_sb, |
| 1623 | "Inconsistent encryption contexts: %lu/%lu", | 1626 | "Inconsistent encryption contexts: %lu/%lu", |
| 1624 | (unsigned long) dir->i_ino, | 1627 | (unsigned long) dir->i_ino, |
| 1625 | (unsigned long) inode->i_ino); | 1628 | (unsigned long) inode->i_ino); |
| 1629 | iput(inode); | ||
| 1626 | return ERR_PTR(-EPERM); | 1630 | return ERR_PTR(-EPERM); |
| 1627 | } | 1631 | } |
| 1628 | } | 1632 | } |
| @@ -2935,6 +2939,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) | |||
| 2935 | struct ext4_dir_entry_2 *de; | 2939 | struct ext4_dir_entry_2 *de; |
| 2936 | handle_t *handle = NULL; | 2940 | handle_t *handle = NULL; |
| 2937 | 2941 | ||
| 2942 | if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) | ||
| 2943 | return -EIO; | ||
| 2944 | |||
| 2938 | /* Initialize quotas before so that eventual writes go in | 2945 | /* Initialize quotas before so that eventual writes go in |
| 2939 | * separate transaction */ | 2946 | * separate transaction */ |
| 2940 | retval = dquot_initialize(dir); | 2947 | retval = dquot_initialize(dir); |
| @@ -3008,6 +3015,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) | |||
| 3008 | struct ext4_dir_entry_2 *de; | 3015 | struct ext4_dir_entry_2 *de; |
| 3009 | handle_t *handle = NULL; | 3016 | handle_t *handle = NULL; |
| 3010 | 3017 | ||
| 3018 | if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) | ||
| 3019 | return -EIO; | ||
| 3020 | |||
| 3011 | trace_ext4_unlink_enter(dir, dentry); | 3021 | trace_ext4_unlink_enter(dir, dentry); |
| 3012 | /* Initialize quotas before so that eventual writes go | 3022 | /* Initialize quotas before so that eventual writes go |
| 3013 | * in separate transaction */ | 3023 | * in separate transaction */ |
| @@ -3078,6 +3088,9 @@ static int ext4_symlink(struct inode *dir, | |||
| 3078 | struct fscrypt_str disk_link; | 3088 | struct fscrypt_str disk_link; |
| 3079 | struct fscrypt_symlink_data *sd = NULL; | 3089 | struct fscrypt_symlink_data *sd = NULL; |
| 3080 | 3090 | ||
| 3091 | if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) | ||
| 3092 | return -EIO; | ||
| 3093 | |||
| 3081 | disk_link.len = len + 1; | 3094 | disk_link.len = len + 1; |
| 3082 | disk_link.name = (char *) symname; | 3095 | disk_link.name = (char *) symname; |
| 3083 | 3096 | ||
| @@ -3088,7 +3101,7 @@ static int ext4_symlink(struct inode *dir, | |||
| 3088 | if (err) | 3101 | if (err) |
| 3089 | return err; | 3102 | return err; |
| 3090 | if (!fscrypt_has_encryption_key(dir)) | 3103 | if (!fscrypt_has_encryption_key(dir)) |
| 3091 | return -EPERM; | 3104 | return -ENOKEY; |
| 3092 | disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + | 3105 | disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + |
| 3093 | sizeof(struct fscrypt_symlink_data)); | 3106 | sizeof(struct fscrypt_symlink_data)); |
| 3094 | sd = kzalloc(disk_link.len, GFP_KERNEL); | 3107 | sd = kzalloc(disk_link.len, GFP_KERNEL); |
| @@ -3525,6 +3538,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 3525 | EXT4_I(old_dentry->d_inode)->i_projid))) | 3538 | EXT4_I(old_dentry->d_inode)->i_projid))) |
| 3526 | return -EXDEV; | 3539 | return -EXDEV; |
| 3527 | 3540 | ||
| 3541 | if ((ext4_encrypted_inode(old_dir) && | ||
| 3542 | !fscrypt_has_encryption_key(old_dir)) || | ||
| 3543 | (ext4_encrypted_inode(new_dir) && | ||
| 3544 | !fscrypt_has_encryption_key(new_dir))) | ||
| 3545 | return -ENOKEY; | ||
| 3546 | |||
| 3528 | retval = dquot_initialize(old.dir); | 3547 | retval = dquot_initialize(old.dir); |
| 3529 | if (retval) | 3548 | if (retval) |
| 3530 | return retval; | 3549 | return retval; |
| @@ -3725,6 +3744,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 3725 | int retval; | 3744 | int retval; |
| 3726 | struct timespec ctime; | 3745 | struct timespec ctime; |
| 3727 | 3746 | ||
| 3747 | if ((ext4_encrypted_inode(old_dir) && | ||
| 3748 | !fscrypt_has_encryption_key(old_dir)) || | ||
| 3749 | (ext4_encrypted_inode(new_dir) && | ||
| 3750 | !fscrypt_has_encryption_key(new_dir))) | ||
| 3751 | return -ENOKEY; | ||
| 3752 | |||
| 3728 | if ((ext4_encrypted_inode(old_dir) || | 3753 | if ((ext4_encrypted_inode(old_dir) || |
| 3729 | ext4_encrypted_inode(new_dir)) && | 3754 | ext4_encrypted_inode(new_dir)) && |
| 3730 | (old_dir != new_dir) && | 3755 | (old_dir != new_dir) && |
| @@ -3858,6 +3883,9 @@ static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry, | |||
| 3858 | struct inode *new_dir, struct dentry *new_dentry, | 3883 | struct inode *new_dir, struct dentry *new_dentry, |
| 3859 | unsigned int flags) | 3884 | unsigned int flags) |
| 3860 | { | 3885 | { |
| 3886 | if (unlikely(ext4_forced_shutdown(EXT4_SB(old_dir->i_sb)))) | ||
| 3887 | return -EIO; | ||
| 3888 | |||
| 3861 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) | 3889 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) |
| 3862 | return -EINVAL; | 3890 | return -EINVAL; |
| 3863 | 3891 | ||
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index d83b0f3c5fe9..208241b06662 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
| 26 | #include <linux/backing-dev.h> | 26 | #include <linux/backing-dev.h> |
| 27 | #include <linux/fscrypto.h> | ||
| 28 | 27 | ||
| 29 | #include "ext4_jbd2.h" | 28 | #include "ext4_jbd2.h" |
| 30 | #include "xattr.h" | 29 | #include "xattr.h" |
| @@ -158,7 +157,7 @@ static int ext4_end_io(ext4_io_end_t *io) | |||
| 158 | 157 | ||
| 159 | io->handle = NULL; /* Following call will use up the handle */ | 158 | io->handle = NULL; /* Following call will use up the handle */ |
| 160 | ret = ext4_convert_unwritten_extents(handle, inode, offset, size); | 159 | ret = ext4_convert_unwritten_extents(handle, inode, offset, size); |
| 161 | if (ret < 0) { | 160 | if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) { |
| 162 | ext4_msg(inode->i_sb, KERN_EMERG, | 161 | ext4_msg(inode->i_sb, KERN_EMERG, |
| 163 | "failed to convert unwritten extents to written " | 162 | "failed to convert unwritten extents to written " |
| 164 | "extents -- potential data loss! " | 163 | "extents -- potential data loss! " |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index cf681004b196..c3ed9021b781 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
| @@ -45,7 +45,8 @@ int ext4_resize_begin(struct super_block *sb) | |||
| 45 | return -EPERM; | 45 | return -EPERM; |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags)) | 48 | if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, |
| 49 | &EXT4_SB(sb)->s_ext4_flags)) | ||
| 49 | ret = -EBUSY; | 50 | ret = -EBUSY; |
| 50 | 51 | ||
| 51 | return ret; | 52 | return ret; |
| @@ -53,7 +54,7 @@ int ext4_resize_begin(struct super_block *sb) | |||
| 53 | 54 | ||
| 54 | void ext4_resize_end(struct super_block *sb) | 55 | void ext4_resize_end(struct super_block *sb) |
| 55 | { | 56 | { |
| 56 | clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags); | 57 | clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags); |
| 57 | smp_mb__after_atomic(); | 58 | smp_mb__after_atomic(); |
| 58 | } | 59 | } |
| 59 | 60 | ||
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 66845a08a87a..2e03a0a88d92 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -438,6 +438,9 @@ void __ext4_error(struct super_block *sb, const char *function, | |||
| 438 | struct va_format vaf; | 438 | struct va_format vaf; |
| 439 | va_list args; | 439 | va_list args; |
| 440 | 440 | ||
| 441 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) | ||
| 442 | return; | ||
| 443 | |||
| 441 | if (ext4_error_ratelimit(sb)) { | 444 | if (ext4_error_ratelimit(sb)) { |
| 442 | va_start(args, fmt); | 445 | va_start(args, fmt); |
| 443 | vaf.fmt = fmt; | 446 | vaf.fmt = fmt; |
| @@ -459,6 +462,9 @@ void __ext4_error_inode(struct inode *inode, const char *function, | |||
| 459 | struct va_format vaf; | 462 | struct va_format vaf; |
| 460 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; | 463 | struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; |
| 461 | 464 | ||
| 465 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 466 | return; | ||
| 467 | |||
| 462 | es->s_last_error_ino = cpu_to_le32(inode->i_ino); | 468 | es->s_last_error_ino = cpu_to_le32(inode->i_ino); |
| 463 | es->s_last_error_block = cpu_to_le64(block); | 469 | es->s_last_error_block = cpu_to_le64(block); |
| 464 | if (ext4_error_ratelimit(inode->i_sb)) { | 470 | if (ext4_error_ratelimit(inode->i_sb)) { |
| @@ -491,6 +497,9 @@ void __ext4_error_file(struct file *file, const char *function, | |||
| 491 | struct inode *inode = file_inode(file); | 497 | struct inode *inode = file_inode(file); |
| 492 | char pathname[80], *path; | 498 | char pathname[80], *path; |
| 493 | 499 | ||
| 500 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 501 | return; | ||
| 502 | |||
| 494 | es = EXT4_SB(inode->i_sb)->s_es; | 503 | es = EXT4_SB(inode->i_sb)->s_es; |
| 495 | es->s_last_error_ino = cpu_to_le32(inode->i_ino); | 504 | es->s_last_error_ino = cpu_to_le32(inode->i_ino); |
| 496 | if (ext4_error_ratelimit(inode->i_sb)) { | 505 | if (ext4_error_ratelimit(inode->i_sb)) { |
| @@ -567,6 +576,9 @@ void __ext4_std_error(struct super_block *sb, const char *function, | |||
| 567 | char nbuf[16]; | 576 | char nbuf[16]; |
| 568 | const char *errstr; | 577 | const char *errstr; |
| 569 | 578 | ||
| 579 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) | ||
| 580 | return; | ||
| 581 | |||
| 570 | /* Special case: if the error is EROFS, and we're not already | 582 | /* Special case: if the error is EROFS, and we're not already |
| 571 | * inside a transaction, then there's really no point in logging | 583 | * inside a transaction, then there's really no point in logging |
| 572 | * an error. */ | 584 | * an error. */ |
| @@ -600,6 +612,9 @@ void __ext4_abort(struct super_block *sb, const char *function, | |||
| 600 | struct va_format vaf; | 612 | struct va_format vaf; |
| 601 | va_list args; | 613 | va_list args; |
| 602 | 614 | ||
| 615 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) | ||
| 616 | return; | ||
| 617 | |||
| 603 | save_error_info(sb, function, line); | 618 | save_error_info(sb, function, line); |
| 604 | va_start(args, fmt); | 619 | va_start(args, fmt); |
| 605 | vaf.fmt = fmt; | 620 | vaf.fmt = fmt; |
| @@ -695,6 +710,9 @@ __acquires(bitlock) | |||
| 695 | va_list args; | 710 | va_list args; |
| 696 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | 711 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; |
| 697 | 712 | ||
| 713 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) | ||
| 714 | return; | ||
| 715 | |||
| 698 | es->s_last_error_ino = cpu_to_le32(ino); | 716 | es->s_last_error_ino = cpu_to_le32(ino); |
| 699 | es->s_last_error_block = cpu_to_le64(block); | 717 | es->s_last_error_block = cpu_to_le64(block); |
| 700 | __save_error_info(sb, function, line); | 718 | __save_error_info(sb, function, line); |
| @@ -825,6 +843,7 @@ static void ext4_put_super(struct super_block *sb) | |||
| 825 | { | 843 | { |
| 826 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 844 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
| 827 | struct ext4_super_block *es = sbi->s_es; | 845 | struct ext4_super_block *es = sbi->s_es; |
| 846 | int aborted = 0; | ||
| 828 | int i, err; | 847 | int i, err; |
| 829 | 848 | ||
| 830 | ext4_unregister_li_request(sb); | 849 | ext4_unregister_li_request(sb); |
| @@ -834,9 +853,10 @@ static void ext4_put_super(struct super_block *sb) | |||
| 834 | destroy_workqueue(sbi->rsv_conversion_wq); | 853 | destroy_workqueue(sbi->rsv_conversion_wq); |
| 835 | 854 | ||
| 836 | if (sbi->s_journal) { | 855 | if (sbi->s_journal) { |
| 856 | aborted = is_journal_aborted(sbi->s_journal); | ||
| 837 | err = jbd2_journal_destroy(sbi->s_journal); | 857 | err = jbd2_journal_destroy(sbi->s_journal); |
| 838 | sbi->s_journal = NULL; | 858 | sbi->s_journal = NULL; |
| 839 | if (err < 0) | 859 | if ((err < 0) && !aborted) |
| 840 | ext4_abort(sb, "Couldn't clean up the journal"); | 860 | ext4_abort(sb, "Couldn't clean up the journal"); |
| 841 | } | 861 | } |
| 842 | 862 | ||
| @@ -847,7 +867,7 @@ static void ext4_put_super(struct super_block *sb) | |||
| 847 | ext4_mb_release(sb); | 867 | ext4_mb_release(sb); |
| 848 | ext4_ext_release(sb); | 868 | ext4_ext_release(sb); |
| 849 | 869 | ||
| 850 | if (!(sb->s_flags & MS_RDONLY)) { | 870 | if (!(sb->s_flags & MS_RDONLY) && !aborted) { |
| 851 | ext4_clear_feature_journal_needs_recovery(sb); | 871 | ext4_clear_feature_journal_needs_recovery(sb); |
| 852 | es->s_state = cpu_to_le16(sbi->s_mount_state); | 872 | es->s_state = cpu_to_le16(sbi->s_mount_state); |
| 853 | } | 873 | } |
| @@ -1100,12 +1120,6 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len) | |||
| 1100 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); | 1120 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); |
| 1101 | } | 1121 | } |
| 1102 | 1122 | ||
| 1103 | static int ext4_key_prefix(struct inode *inode, u8 **key) | ||
| 1104 | { | ||
| 1105 | *key = EXT4_SB(inode->i_sb)->key_prefix; | ||
| 1106 | return EXT4_SB(inode->i_sb)->key_prefix_size; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | static int ext4_prepare_context(struct inode *inode) | 1123 | static int ext4_prepare_context(struct inode *inode) |
| 1110 | { | 1124 | { |
| 1111 | return ext4_convert_inline_data(inode); | 1125 | return ext4_convert_inline_data(inode); |
| @@ -1179,9 +1193,9 @@ static unsigned ext4_max_namelen(struct inode *inode) | |||
| 1179 | EXT4_NAME_LEN; | 1193 | EXT4_NAME_LEN; |
| 1180 | } | 1194 | } |
| 1181 | 1195 | ||
| 1182 | static struct fscrypt_operations ext4_cryptops = { | 1196 | static const struct fscrypt_operations ext4_cryptops = { |
| 1197 | .key_prefix = "ext4:", | ||
| 1183 | .get_context = ext4_get_context, | 1198 | .get_context = ext4_get_context, |
| 1184 | .key_prefix = ext4_key_prefix, | ||
| 1185 | .prepare_context = ext4_prepare_context, | 1199 | .prepare_context = ext4_prepare_context, |
| 1186 | .set_context = ext4_set_context, | 1200 | .set_context = ext4_set_context, |
| 1187 | .dummy_context = ext4_dummy_context, | 1201 | .dummy_context = ext4_dummy_context, |
| @@ -1190,7 +1204,7 @@ static struct fscrypt_operations ext4_cryptops = { | |||
| 1190 | .max_namelen = ext4_max_namelen, | 1204 | .max_namelen = ext4_max_namelen, |
| 1191 | }; | 1205 | }; |
| 1192 | #else | 1206 | #else |
| 1193 | static struct fscrypt_operations ext4_cryptops = { | 1207 | static const struct fscrypt_operations ext4_cryptops = { |
| 1194 | .is_encrypted = ext4_encrypted_inode, | 1208 | .is_encrypted = ext4_encrypted_inode, |
| 1195 | }; | 1209 | }; |
| 1196 | #endif | 1210 | #endif |
| @@ -1290,7 +1304,7 @@ enum { | |||
| 1290 | Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, | 1304 | Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, |
| 1291 | Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, | 1305 | Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, |
| 1292 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, | 1306 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, |
| 1293 | Opt_lazytime, Opt_nolazytime, | 1307 | Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, |
| 1294 | Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, | 1308 | Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, |
| 1295 | Opt_inode_readahead_blks, Opt_journal_ioprio, | 1309 | Opt_inode_readahead_blks, Opt_journal_ioprio, |
| 1296 | Opt_dioread_nolock, Opt_dioread_lock, | 1310 | Opt_dioread_nolock, Opt_dioread_lock, |
| @@ -1358,6 +1372,7 @@ static const match_table_t tokens = { | |||
| 1358 | {Opt_delalloc, "delalloc"}, | 1372 | {Opt_delalloc, "delalloc"}, |
| 1359 | {Opt_lazytime, "lazytime"}, | 1373 | {Opt_lazytime, "lazytime"}, |
| 1360 | {Opt_nolazytime, "nolazytime"}, | 1374 | {Opt_nolazytime, "nolazytime"}, |
| 1375 | {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"}, | ||
| 1361 | {Opt_nodelalloc, "nodelalloc"}, | 1376 | {Opt_nodelalloc, "nodelalloc"}, |
| 1362 | {Opt_removed, "mblk_io_submit"}, | 1377 | {Opt_removed, "mblk_io_submit"}, |
| 1363 | {Opt_removed, "nomblk_io_submit"}, | 1378 | {Opt_removed, "nomblk_io_submit"}, |
| @@ -1563,6 +1578,7 @@ static const struct mount_opts { | |||
| 1563 | #endif | 1578 | #endif |
| 1564 | {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, | 1579 | {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, |
| 1565 | {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, | 1580 | {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, |
| 1581 | {Opt_debug_want_extra_isize, 0, MOPT_GTE0}, | ||
| 1566 | {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, | 1582 | {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, |
| 1567 | {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, | 1583 | {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, |
| 1568 | MOPT_SET | MOPT_Q}, | 1584 | MOPT_SET | MOPT_Q}, |
| @@ -1676,6 +1692,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, | |||
| 1676 | if (arg == 0) | 1692 | if (arg == 0) |
| 1677 | arg = JBD2_DEFAULT_MAX_COMMIT_AGE; | 1693 | arg = JBD2_DEFAULT_MAX_COMMIT_AGE; |
| 1678 | sbi->s_commit_interval = HZ * arg; | 1694 | sbi->s_commit_interval = HZ * arg; |
| 1695 | } else if (token == Opt_debug_want_extra_isize) { | ||
| 1696 | sbi->s_want_extra_isize = arg; | ||
| 1679 | } else if (token == Opt_max_batch_time) { | 1697 | } else if (token == Opt_max_batch_time) { |
| 1680 | sbi->s_max_batch_time = arg; | 1698 | sbi->s_max_batch_time = arg; |
| 1681 | } else if (token == Opt_min_batch_time) { | 1699 | } else if (token == Opt_min_batch_time) { |
| @@ -2619,9 +2637,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) | |||
| 2619 | 2637 | ||
| 2620 | if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) | 2638 | if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) |
| 2621 | ret = sbi->s_stripe; | 2639 | ret = sbi->s_stripe; |
| 2622 | else if (stripe_width <= sbi->s_blocks_per_group) | 2640 | else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) |
| 2623 | ret = stripe_width; | 2641 | ret = stripe_width; |
| 2624 | else if (stride <= sbi->s_blocks_per_group) | 2642 | else if (stride && stride <= sbi->s_blocks_per_group) |
| 2625 | ret = stride; | 2643 | ret = stride; |
| 2626 | else | 2644 | else |
| 2627 | ret = 0; | 2645 | ret = 0; |
| @@ -3842,7 +3860,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 3842 | db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / | 3860 | db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / |
| 3843 | EXT4_DESC_PER_BLOCK(sb); | 3861 | EXT4_DESC_PER_BLOCK(sb); |
| 3844 | if (ext4_has_feature_meta_bg(sb)) { | 3862 | if (ext4_has_feature_meta_bg(sb)) { |
| 3845 | if (le32_to_cpu(es->s_first_meta_bg) >= db_count) { | 3863 | if (le32_to_cpu(es->s_first_meta_bg) > db_count) { |
| 3846 | ext4_msg(sb, KERN_WARNING, | 3864 | ext4_msg(sb, KERN_WARNING, |
| 3847 | "first meta block group too large: %u " | 3865 | "first meta block group too large: %u " |
| 3848 | "(group descriptor block count %u)", | 3866 | "(group descriptor block count %u)", |
| @@ -3925,7 +3943,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 3925 | * root first: it may be modified in the journal! | 3943 | * root first: it may be modified in the journal! |
| 3926 | */ | 3944 | */ |
| 3927 | if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { | 3945 | if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { |
| 3928 | if (ext4_load_journal(sb, es, journal_devnum)) | 3946 | err = ext4_load_journal(sb, es, journal_devnum); |
| 3947 | if (err) | ||
| 3929 | goto failed_mount3a; | 3948 | goto failed_mount3a; |
| 3930 | } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && | 3949 | } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && |
| 3931 | ext4_has_feature_journal_needs_recovery(sb)) { | 3950 | ext4_has_feature_journal_needs_recovery(sb)) { |
| @@ -4087,7 +4106,8 @@ no_journal: | |||
| 4087 | sb->s_flags |= MS_RDONLY; | 4106 | sb->s_flags |= MS_RDONLY; |
| 4088 | 4107 | ||
| 4089 | /* determine the minimum size of new large inodes, if present */ | 4108 | /* determine the minimum size of new large inodes, if present */ |
| 4090 | if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { | 4109 | if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE && |
| 4110 | sbi->s_want_extra_isize == 0) { | ||
| 4091 | sbi->s_want_extra_isize = sizeof(struct ext4_inode) - | 4111 | sbi->s_want_extra_isize = sizeof(struct ext4_inode) - |
| 4092 | EXT4_GOOD_OLD_INODE_SIZE; | 4112 | EXT4_GOOD_OLD_INODE_SIZE; |
| 4093 | if (ext4_has_feature_extra_isize(sb)) { | 4113 | if (ext4_has_feature_extra_isize(sb)) { |
| @@ -4218,11 +4238,6 @@ no_journal: | |||
| 4218 | ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); | 4238 | ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); |
| 4219 | 4239 | ||
| 4220 | kfree(orig_data); | 4240 | kfree(orig_data); |
| 4221 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
| 4222 | memcpy(sbi->key_prefix, EXT4_KEY_DESC_PREFIX, | ||
| 4223 | EXT4_KEY_DESC_PREFIX_SIZE); | ||
| 4224 | sbi->key_prefix_size = EXT4_KEY_DESC_PREFIX_SIZE; | ||
| 4225 | #endif | ||
| 4226 | return 0; | 4241 | return 0; |
| 4227 | 4242 | ||
| 4228 | cantfind_ext4: | 4243 | cantfind_ext4: |
| @@ -4720,6 +4735,9 @@ static int ext4_sync_fs(struct super_block *sb, int wait) | |||
| 4720 | bool needs_barrier = false; | 4735 | bool needs_barrier = false; |
| 4721 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4736 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
| 4722 | 4737 | ||
| 4738 | if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) | ||
| 4739 | return 0; | ||
| 4740 | |||
| 4723 | trace_ext4_sync_fs(sb, wait); | 4741 | trace_ext4_sync_fs(sb, wait); |
| 4724 | flush_workqueue(sbi->rsv_conversion_wq); | 4742 | flush_workqueue(sbi->rsv_conversion_wq); |
| 4725 | /* | 4743 | /* |
| @@ -4803,7 +4821,7 @@ out: | |||
| 4803 | */ | 4821 | */ |
| 4804 | static int ext4_unfreeze(struct super_block *sb) | 4822 | static int ext4_unfreeze(struct super_block *sb) |
| 4805 | { | 4823 | { |
| 4806 | if (sb->s_flags & MS_RDONLY) | 4824 | if ((sb->s_flags & MS_RDONLY) || ext4_forced_shutdown(EXT4_SB(sb))) |
| 4807 | return 0; | 4825 | return 0; |
| 4808 | 4826 | ||
| 4809 | if (EXT4_SB(sb)->s_journal) { | 4827 | if (EXT4_SB(sb)->s_journal) { |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 5a94fa52b74f..67636acf7624 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
| @@ -411,6 +411,9 @@ ext4_xattr_get(struct inode *inode, int name_index, const char *name, | |||
| 411 | { | 411 | { |
| 412 | int error; | 412 | int error; |
| 413 | 413 | ||
| 414 | if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) | ||
| 415 | return -EIO; | ||
| 416 | |||
| 414 | if (strlen(name) > 255) | 417 | if (strlen(name) > 255) |
| 415 | return -ERANGE; | 418 | return -ERANGE; |
| 416 | 419 | ||
| @@ -1188,16 +1191,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 1188 | struct ext4_xattr_block_find bs = { | 1191 | struct ext4_xattr_block_find bs = { |
| 1189 | .s = { .not_found = -ENODATA, }, | 1192 | .s = { .not_found = -ENODATA, }, |
| 1190 | }; | 1193 | }; |
| 1191 | unsigned long no_expand; | 1194 | int no_expand; |
| 1192 | int error; | 1195 | int error; |
| 1193 | 1196 | ||
| 1194 | if (!name) | 1197 | if (!name) |
| 1195 | return -EINVAL; | 1198 | return -EINVAL; |
| 1196 | if (strlen(name) > 255) | 1199 | if (strlen(name) > 255) |
| 1197 | return -ERANGE; | 1200 | return -ERANGE; |
| 1198 | down_write(&EXT4_I(inode)->xattr_sem); | 1201 | ext4_write_lock_xattr(inode, &no_expand); |
| 1199 | no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 1200 | ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 1201 | 1202 | ||
| 1202 | error = ext4_reserve_inode_write(handle, inode, &is.iloc); | 1203 | error = ext4_reserve_inode_write(handle, inode, &is.iloc); |
| 1203 | if (error) | 1204 | if (error) |
| @@ -1264,7 +1265,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 1264 | ext4_xattr_update_super_block(handle, inode->i_sb); | 1265 | ext4_xattr_update_super_block(handle, inode->i_sb); |
| 1265 | inode->i_ctime = current_time(inode); | 1266 | inode->i_ctime = current_time(inode); |
| 1266 | if (!value) | 1267 | if (!value) |
| 1267 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); | 1268 | no_expand = 0; |
| 1268 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); | 1269 | error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); |
| 1269 | /* | 1270 | /* |
| 1270 | * The bh is consumed by ext4_mark_iloc_dirty, even with | 1271 | * The bh is consumed by ext4_mark_iloc_dirty, even with |
| @@ -1278,9 +1279,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | |||
| 1278 | cleanup: | 1279 | cleanup: |
| 1279 | brelse(is.iloc.bh); | 1280 | brelse(is.iloc.bh); |
| 1280 | brelse(bs.bh); | 1281 | brelse(bs.bh); |
| 1281 | if (no_expand == 0) | 1282 | ext4_write_unlock_xattr(inode, &no_expand); |
| 1282 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 1283 | up_write(&EXT4_I(inode)->xattr_sem); | ||
| 1284 | return error; | 1283 | return error; |
| 1285 | } | 1284 | } |
| 1286 | 1285 | ||
| @@ -1497,12 +1496,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize, | |||
| 1497 | int error = 0, tried_min_extra_isize = 0; | 1496 | int error = 0, tried_min_extra_isize = 0; |
| 1498 | int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); | 1497 | int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize); |
| 1499 | int isize_diff; /* How much do we need to grow i_extra_isize */ | 1498 | int isize_diff; /* How much do we need to grow i_extra_isize */ |
| 1499 | int no_expand; | ||
| 1500 | |||
| 1501 | if (ext4_write_trylock_xattr(inode, &no_expand) == 0) | ||
| 1502 | return 0; | ||
| 1500 | 1503 | ||
| 1501 | down_write(&EXT4_I(inode)->xattr_sem); | ||
| 1502 | /* | ||
| 1503 | * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty | ||
| 1504 | */ | ||
| 1505 | ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 1506 | retry: | 1504 | retry: |
| 1507 | isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize; | 1505 | isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize; |
| 1508 | if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) | 1506 | if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) |
| @@ -1584,17 +1582,16 @@ shift: | |||
| 1584 | EXT4_I(inode)->i_extra_isize = new_extra_isize; | 1582 | EXT4_I(inode)->i_extra_isize = new_extra_isize; |
| 1585 | brelse(bh); | 1583 | brelse(bh); |
| 1586 | out: | 1584 | out: |
| 1587 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); | 1585 | ext4_write_unlock_xattr(inode, &no_expand); |
| 1588 | up_write(&EXT4_I(inode)->xattr_sem); | ||
| 1589 | return 0; | 1586 | return 0; |
| 1590 | 1587 | ||
| 1591 | cleanup: | 1588 | cleanup: |
| 1592 | brelse(bh); | 1589 | brelse(bh); |
| 1593 | /* | 1590 | /* |
| 1594 | * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode | 1591 | * Inode size expansion failed; don't try again |
| 1595 | * size expansion failed. | ||
| 1596 | */ | 1592 | */ |
| 1597 | up_write(&EXT4_I(inode)->xattr_sem); | 1593 | no_expand = 1; |
| 1594 | ext4_write_unlock_xattr(inode, &no_expand); | ||
| 1598 | return error; | 1595 | return error; |
| 1599 | } | 1596 | } |
| 1600 | 1597 | ||
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index a92e783fa057..099c8b670ef5 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h | |||
| @@ -102,6 +102,38 @@ extern const struct xattr_handler ext4_xattr_security_handler; | |||
| 102 | 102 | ||
| 103 | #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c" | 103 | #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c" |
| 104 | 104 | ||
| 105 | /* | ||
| 106 | * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes. | ||
| 107 | * The first is to signal that there the inline xattrs and data are | ||
| 108 | * taking up so much space that we might as well not keep trying to | ||
| 109 | * expand it. The second is that xattr_sem is taken for writing, so | ||
| 110 | * we shouldn't try to recurse into the inode expansion. For this | ||
| 111 | * second case, we need to make sure that we take save and restore the | ||
| 112 | * NO_EXPAND state flag appropriately. | ||
| 113 | */ | ||
| 114 | static inline void ext4_write_lock_xattr(struct inode *inode, int *save) | ||
| 115 | { | ||
| 116 | down_write(&EXT4_I(inode)->xattr_sem); | ||
| 117 | *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 118 | ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline int ext4_write_trylock_xattr(struct inode *inode, int *save) | ||
| 122 | { | ||
| 123 | if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0) | ||
| 124 | return 0; | ||
| 125 | *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 126 | ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 127 | return 1; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline void ext4_write_unlock_xattr(struct inode *inode, int *save) | ||
| 131 | { | ||
| 132 | if (*save == 0) | ||
| 133 | ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND); | ||
| 134 | up_write(&EXT4_I(inode)->xattr_sem); | ||
| 135 | } | ||
| 136 | |||
| 105 | extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); | 137 | extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); |
| 106 | 138 | ||
| 107 | extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t); | 139 | extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t); |
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 827c5daef4fc..18607fc5240d 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c | |||
| @@ -268,7 +268,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, | |||
| 268 | 268 | ||
| 269 | err = fscrypt_setup_filename(dir, child, 1, &fname); | 269 | err = fscrypt_setup_filename(dir, child, 1, &fname); |
| 270 | if (err) { | 270 | if (err) { |
| 271 | *res_page = ERR_PTR(err); | 271 | if (err == -ENOENT) |
| 272 | *res_page = NULL; | ||
| 273 | else | ||
| 274 | *res_page = ERR_PTR(err); | ||
| 272 | return NULL; | 275 | return NULL; |
| 273 | } | 276 | } |
| 274 | 277 | ||
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 2da8c3aa0ce5..069fc7277d8d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
| @@ -22,7 +22,11 @@ | |||
| 22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/bio.h> | 23 | #include <linux/bio.h> |
| 24 | #include <linux/blkdev.h> | 24 | #include <linux/blkdev.h> |
| 25 | #include <linux/fscrypto.h> | 25 | #ifdef CONFIG_F2FS_FS_ENCRYPTION |
| 26 | #include <linux/fscrypt_supp.h> | ||
| 27 | #else | ||
| 28 | #include <linux/fscrypt_notsupp.h> | ||
| 29 | #endif | ||
| 26 | #include <crypto/hash.h> | 30 | #include <crypto/hash.h> |
| 27 | 31 | ||
| 28 | #ifdef CONFIG_F2FS_CHECK_FS | 32 | #ifdef CONFIG_F2FS_CHECK_FS |
| @@ -760,10 +764,6 @@ enum { | |||
| 760 | MAX_TIME, | 764 | MAX_TIME, |
| 761 | }; | 765 | }; |
| 762 | 766 | ||
| 763 | #ifdef CONFIG_F2FS_FS_ENCRYPTION | ||
| 764 | #define F2FS_KEY_DESC_PREFIX "f2fs:" | ||
| 765 | #define F2FS_KEY_DESC_PREFIX_SIZE 5 | ||
| 766 | #endif | ||
| 767 | struct f2fs_sb_info { | 767 | struct f2fs_sb_info { |
| 768 | struct super_block *sb; /* pointer to VFS super block */ | 768 | struct super_block *sb; /* pointer to VFS super block */ |
| 769 | struct proc_dir_entry *s_proc; /* proc entry */ | 769 | struct proc_dir_entry *s_proc; /* proc entry */ |
| @@ -771,11 +771,6 @@ struct f2fs_sb_info { | |||
| 771 | int valid_super_block; /* valid super block no */ | 771 | int valid_super_block; /* valid super block no */ |
| 772 | unsigned long s_flag; /* flags for sbi */ | 772 | unsigned long s_flag; /* flags for sbi */ |
| 773 | 773 | ||
| 774 | #ifdef CONFIG_F2FS_FS_ENCRYPTION | ||
| 775 | u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE]; | ||
| 776 | u8 key_prefix_size; | ||
| 777 | #endif | ||
| 778 | |||
| 779 | #ifdef CONFIG_BLK_DEV_ZONED | 774 | #ifdef CONFIG_BLK_DEV_ZONED |
| 780 | unsigned int blocks_per_blkz; /* F2FS blocks per zone */ | 775 | unsigned int blocks_per_blkz; /* F2FS blocks per zone */ |
| 781 | unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ | 776 | unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */ |
| @@ -2510,28 +2505,4 @@ static inline bool f2fs_may_encrypt(struct inode *inode) | |||
| 2510 | #endif | 2505 | #endif |
| 2511 | } | 2506 | } |
| 2512 | 2507 | ||
| 2513 | #ifndef CONFIG_F2FS_FS_ENCRYPTION | ||
| 2514 | #define fscrypt_set_d_op(i) | ||
| 2515 | #define fscrypt_get_ctx fscrypt_notsupp_get_ctx | ||
| 2516 | #define fscrypt_release_ctx fscrypt_notsupp_release_ctx | ||
| 2517 | #define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page | ||
| 2518 | #define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page | ||
| 2519 | #define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages | ||
| 2520 | #define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page | ||
| 2521 | #define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page | ||
| 2522 | #define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range | ||
| 2523 | #define fscrypt_ioctl_set_policy fscrypt_notsupp_ioctl_set_policy | ||
| 2524 | #define fscrypt_ioctl_get_policy fscrypt_notsupp_ioctl_get_policy | ||
| 2525 | #define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context | ||
| 2526 | #define fscrypt_inherit_context fscrypt_notsupp_inherit_context | ||
| 2527 | #define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info | ||
| 2528 | #define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info | ||
| 2529 | #define fscrypt_setup_filename fscrypt_notsupp_setup_filename | ||
| 2530 | #define fscrypt_free_filename fscrypt_notsupp_free_filename | ||
| 2531 | #define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size | ||
| 2532 | #define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer | ||
| 2533 | #define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer | ||
| 2534 | #define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr | ||
| 2535 | #define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk | ||
| 2536 | #endif | ||
| 2537 | #endif | 2508 | #endif |
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 56c19b0610a8..11cabcadb1a3 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c | |||
| @@ -403,7 +403,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, | |||
| 403 | return err; | 403 | return err; |
| 404 | 404 | ||
| 405 | if (!fscrypt_has_encryption_key(dir)) | 405 | if (!fscrypt_has_encryption_key(dir)) |
| 406 | return -EPERM; | 406 | return -ENOKEY; |
| 407 | 407 | ||
| 408 | disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + | 408 | disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + |
| 409 | sizeof(struct fscrypt_symlink_data)); | 409 | sizeof(struct fscrypt_symlink_data)); |
| @@ -447,7 +447,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, | |||
| 447 | goto err_out; | 447 | goto err_out; |
| 448 | 448 | ||
| 449 | if (!fscrypt_has_encryption_key(inode)) { | 449 | if (!fscrypt_has_encryption_key(inode)) { |
| 450 | err = -EPERM; | 450 | err = -ENOKEY; |
| 451 | goto err_out; | 451 | goto err_out; |
| 452 | } | 452 | } |
| 453 | 453 | ||
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 0738f48293cc..0d8802453758 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
| @@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, | |||
| 713 | } | 713 | } |
| 714 | sector = SECTOR_FROM_BLOCK(blkstart); | 714 | sector = SECTOR_FROM_BLOCK(blkstart); |
| 715 | 715 | ||
| 716 | if (sector & (bdev_zone_size(bdev) - 1) || | 716 | if (sector & (bdev_zone_sectors(bdev) - 1) || |
| 717 | nr_sects != bdev_zone_size(bdev)) { | 717 | nr_sects != bdev_zone_sectors(bdev)) { |
| 718 | f2fs_msg(sbi->sb, KERN_INFO, | 718 | f2fs_msg(sbi->sb, KERN_INFO, |
| 719 | "(%d) %s: Unaligned discard attempted (block %x + %x)", | 719 | "(%d) %s: Unaligned discard attempted (block %x + %x)", |
| 720 | devi, sbi->s_ndevs ? FDEV(devi).path: "", | 720 | devi, sbi->s_ndevs ? FDEV(devi).path: "", |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 702638e21c76..a831303bb777 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
| @@ -1156,12 +1156,6 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len) | |||
| 1156 | ctx, len, NULL); | 1156 | ctx, len, NULL); |
| 1157 | } | 1157 | } |
| 1158 | 1158 | ||
| 1159 | static int f2fs_key_prefix(struct inode *inode, u8 **key) | ||
| 1160 | { | ||
| 1161 | *key = F2FS_I_SB(inode)->key_prefix; | ||
| 1162 | return F2FS_I_SB(inode)->key_prefix_size; | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, | 1159 | static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, |
| 1166 | void *fs_data) | 1160 | void *fs_data) |
| 1167 | { | 1161 | { |
| @@ -1176,16 +1170,16 @@ static unsigned f2fs_max_namelen(struct inode *inode) | |||
| 1176 | inode->i_sb->s_blocksize : F2FS_NAME_LEN; | 1170 | inode->i_sb->s_blocksize : F2FS_NAME_LEN; |
| 1177 | } | 1171 | } |
| 1178 | 1172 | ||
| 1179 | static struct fscrypt_operations f2fs_cryptops = { | 1173 | static const struct fscrypt_operations f2fs_cryptops = { |
| 1174 | .key_prefix = "f2fs:", | ||
| 1180 | .get_context = f2fs_get_context, | 1175 | .get_context = f2fs_get_context, |
| 1181 | .key_prefix = f2fs_key_prefix, | ||
| 1182 | .set_context = f2fs_set_context, | 1176 | .set_context = f2fs_set_context, |
| 1183 | .is_encrypted = f2fs_encrypted_inode, | 1177 | .is_encrypted = f2fs_encrypted_inode, |
| 1184 | .empty_dir = f2fs_empty_dir, | 1178 | .empty_dir = f2fs_empty_dir, |
| 1185 | .max_namelen = f2fs_max_namelen, | 1179 | .max_namelen = f2fs_max_namelen, |
| 1186 | }; | 1180 | }; |
| 1187 | #else | 1181 | #else |
| 1188 | static struct fscrypt_operations f2fs_cryptops = { | 1182 | static const struct fscrypt_operations f2fs_cryptops = { |
| 1189 | .is_encrypted = f2fs_encrypted_inode, | 1183 | .is_encrypted = f2fs_encrypted_inode, |
| 1190 | }; | 1184 | }; |
| 1191 | #endif | 1185 | #endif |
| @@ -1518,12 +1512,6 @@ static void init_sb_info(struct f2fs_sb_info *sbi) | |||
| 1518 | mutex_init(&sbi->wio_mutex[NODE]); | 1512 | mutex_init(&sbi->wio_mutex[NODE]); |
| 1519 | mutex_init(&sbi->wio_mutex[DATA]); | 1513 | mutex_init(&sbi->wio_mutex[DATA]); |
| 1520 | spin_lock_init(&sbi->cp_lock); | 1514 | spin_lock_init(&sbi->cp_lock); |
| 1521 | |||
| 1522 | #ifdef CONFIG_F2FS_FS_ENCRYPTION | ||
| 1523 | memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX, | ||
| 1524 | F2FS_KEY_DESC_PREFIX_SIZE); | ||
| 1525 | sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE; | ||
| 1526 | #endif | ||
| 1527 | } | 1515 | } |
| 1528 | 1516 | ||
| 1529 | static int init_percpu_info(struct f2fs_sb_info *sbi) | 1517 | static int init_percpu_info(struct f2fs_sb_info *sbi) |
| @@ -1553,16 +1541,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) | |||
| 1553 | return 0; | 1541 | return 0; |
| 1554 | 1542 | ||
| 1555 | if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != | 1543 | if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != |
| 1556 | SECTOR_TO_BLOCK(bdev_zone_size(bdev))) | 1544 | SECTOR_TO_BLOCK(bdev_zone_sectors(bdev))) |
| 1557 | return -EINVAL; | 1545 | return -EINVAL; |
| 1558 | sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev)); | 1546 | sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)); |
| 1559 | if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != | 1547 | if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != |
| 1560 | __ilog2_u32(sbi->blocks_per_blkz)) | 1548 | __ilog2_u32(sbi->blocks_per_blkz)) |
| 1561 | return -EINVAL; | 1549 | return -EINVAL; |
| 1562 | sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); | 1550 | sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); |
| 1563 | FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >> | 1551 | FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >> |
| 1564 | sbi->log_blocks_per_blkz; | 1552 | sbi->log_blocks_per_blkz; |
| 1565 | if (nr_sectors & (bdev_zone_size(bdev) - 1)) | 1553 | if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) |
| 1566 | FDEV(devi).nr_blkz++; | 1554 | FDEV(devi).nr_blkz++; |
| 1567 | 1555 | ||
| 1568 | FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL); | 1556 | FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL); |
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 4304072161aa..40d61077bead 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c | |||
| @@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | |||
| 542 | hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { | 542 | hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { |
| 543 | if (invalidate) | 543 | if (invalidate) |
| 544 | set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); | 544 | set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); |
| 545 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
| 545 | fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); | 546 | fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); |
| 546 | } | 547 | } |
| 547 | } else { | 548 | } else { |
| @@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) | |||
| 560 | wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, | 561 | wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, |
| 561 | TASK_UNINTERRUPTIBLE); | 562 | TASK_UNINTERRUPTIBLE); |
| 562 | 563 | ||
| 564 | /* Make sure any pending writes are cancelled. */ | ||
| 565 | if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) | ||
| 566 | fscache_invalidate_writes(cookie); | ||
| 567 | |||
| 563 | /* Reset the cookie state if it wasn't relinquished */ | 568 | /* Reset the cookie state if it wasn't relinquished */ |
| 564 | if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { | 569 | if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { |
| 565 | atomic_inc(&cookie->n_active); | 570 | atomic_inc(&cookie->n_active); |
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c index 9b28649df3a1..a8aa00be4444 100644 --- a/fs/fscache/netfs.c +++ b/fs/fscache/netfs.c | |||
| @@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) | |||
| 48 | cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; | 48 | cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; |
| 49 | 49 | ||
| 50 | spin_lock_init(&cookie->lock); | 50 | spin_lock_init(&cookie->lock); |
| 51 | spin_lock_init(&cookie->stores_lock); | ||
| 51 | INIT_HLIST_HEAD(&cookie->backing_objects); | 52 | INIT_HLIST_HEAD(&cookie->backing_objects); |
| 52 | 53 | ||
| 53 | /* check the netfs type is not already present */ | 54 | /* check the netfs type is not already present */ |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 9e792e30f4db..7a182c87f378 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
| @@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object | |||
| 30 | static const struct fscache_state *fscache_object_available(struct fscache_object *, int); | 30 | static const struct fscache_state *fscache_object_available(struct fscache_object *, int); |
| 31 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); | 31 | static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); |
| 32 | static const struct fscache_state *fscache_update_object(struct fscache_object *, int); | 32 | static const struct fscache_state *fscache_update_object(struct fscache_object *, int); |
| 33 | static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); | ||
| 33 | 34 | ||
| 34 | #define __STATE_NAME(n) fscache_osm_##n | 35 | #define __STATE_NAME(n) fscache_osm_##n |
| 35 | #define STATE(n) (&__STATE_NAME(n)) | 36 | #define STATE(n) (&__STATE_NAME(n)) |
| @@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); | |||
| 91 | static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); | 92 | static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); |
| 92 | static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); | 93 | static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); |
| 93 | static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); | 94 | static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); |
| 94 | static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); | 95 | static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); |
| 95 | 96 | ||
| 96 | static WAIT_STATE(WAIT_FOR_INIT, "?INI", | 97 | static WAIT_STATE(WAIT_FOR_INIT, "?INI", |
| 97 | TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); | 98 | TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); |
| @@ -229,6 +230,10 @@ execute_work_state: | |||
| 229 | event = -1; | 230 | event = -1; |
| 230 | if (new_state == NO_TRANSIT) { | 231 | if (new_state == NO_TRANSIT) { |
| 231 | _debug("{OBJ%x} %s notrans", object->debug_id, state->name); | 232 | _debug("{OBJ%x} %s notrans", object->debug_id, state->name); |
| 233 | if (unlikely(state == STATE(OBJECT_DEAD))) { | ||
| 234 | _leave(" [dead]"); | ||
| 235 | return; | ||
| 236 | } | ||
| 232 | fscache_enqueue_object(object); | 237 | fscache_enqueue_object(object); |
| 233 | event_mask = object->oob_event_mask; | 238 | event_mask = object->oob_event_mask; |
| 234 | goto unmask_events; | 239 | goto unmask_events; |
| @@ -239,7 +244,7 @@ execute_work_state: | |||
| 239 | object->state = state = new_state; | 244 | object->state = state = new_state; |
| 240 | 245 | ||
| 241 | if (state->work) { | 246 | if (state->work) { |
| 242 | if (unlikely(state->work == ((void *)2UL))) { | 247 | if (unlikely(state == STATE(OBJECT_DEAD))) { |
| 243 | _leave(" [dead]"); | 248 | _leave(" [dead]"); |
| 244 | return; | 249 | return; |
| 245 | } | 250 | } |
| @@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob | |||
| 645 | fscache_mark_object_dead(object); | 650 | fscache_mark_object_dead(object); |
| 646 | object->oob_event_mask = 0; | 651 | object->oob_event_mask = 0; |
| 647 | 652 | ||
| 653 | if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { | ||
| 654 | /* Reject any new read/write ops and abort any that are pending. */ | ||
| 655 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | ||
| 656 | fscache_cancel_all_ops(object); | ||
| 657 | } | ||
| 658 | |||
| 648 | if (list_empty(&object->dependents) && | 659 | if (list_empty(&object->dependents) && |
| 649 | object->n_ops == 0 && | 660 | object->n_ops == 0 && |
| 650 | object->n_children == 0) | 661 | object->n_children == 0) |
| @@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object, | |||
| 1077 | } | 1088 | } |
| 1078 | } | 1089 | } |
| 1079 | EXPORT_SYMBOL(fscache_object_mark_killed); | 1090 | EXPORT_SYMBOL(fscache_object_mark_killed); |
| 1091 | |||
| 1092 | /* | ||
| 1093 | * The object is dead. We can get here if an object gets queued by an event | ||
| 1094 | * that would lead to its death (such as EV_KILL) when the dispatcher is | ||
| 1095 | * already running (and so can be requeued) but hasn't yet cleared the event | ||
| 1096 | * mask. | ||
| 1097 | */ | ||
| 1098 | static const struct fscache_state *fscache_object_dead(struct fscache_object *object, | ||
| 1099 | int event) | ||
| 1100 | { | ||
| 1101 | if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, | ||
| 1102 | &object->flags)) | ||
| 1103 | return NO_TRANSIT; | ||
| 1104 | |||
| 1105 | WARN(true, "FS-Cache object redispatched after death"); | ||
| 1106 | return NO_TRANSIT; | ||
| 1107 | } | ||
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 70ea57c7b6bb..f11792672977 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -399,6 +399,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
| 399 | static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) | 399 | static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) |
| 400 | { | 400 | { |
| 401 | spin_lock(&fiq->waitq.lock); | 401 | spin_lock(&fiq->waitq.lock); |
| 402 | if (test_bit(FR_FINISHED, &req->flags)) { | ||
| 403 | spin_unlock(&fiq->waitq.lock); | ||
| 404 | return; | ||
| 405 | } | ||
| 402 | if (list_empty(&req->intr_entry)) { | 406 | if (list_empty(&req->intr_entry)) { |
| 403 | list_add_tail(&req->intr_entry, &fiq->interrupts); | 407 | list_add_tail(&req->intr_entry, &fiq->interrupts); |
| 404 | wake_up_locked(&fiq->waitq); | 408 | wake_up_locked(&fiq->waitq); |
| @@ -1372,6 +1376,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, | |||
| 1372 | * code can Oops if the buffer persists after module unload. | 1376 | * code can Oops if the buffer persists after module unload. |
| 1373 | */ | 1377 | */ |
| 1374 | bufs[page_nr].ops = &nosteal_pipe_buf_ops; | 1378 | bufs[page_nr].ops = &nosteal_pipe_buf_ops; |
| 1379 | bufs[page_nr].flags = 0; | ||
| 1375 | ret = add_to_pipe(pipe, &bufs[page_nr++]); | 1380 | ret = add_to_pipe(pipe, &bufs[page_nr++]); |
| 1376 | if (unlikely(ret < 0)) | 1381 | if (unlikely(ret < 0)) |
| 1377 | break; | 1382 | break; |
| @@ -2025,7 +2030,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) | |||
| 2025 | struct fuse_req *req; | 2030 | struct fuse_req *req; |
| 2026 | req = list_entry(head->next, struct fuse_req, list); | 2031 | req = list_entry(head->next, struct fuse_req, list); |
| 2027 | req->out.h.error = -ECONNABORTED; | 2032 | req->out.h.error = -ECONNABORTED; |
| 2028 | clear_bit(FR_PENDING, &req->flags); | ||
| 2029 | clear_bit(FR_SENT, &req->flags); | 2033 | clear_bit(FR_SENT, &req->flags); |
| 2030 | list_del_init(&req->list); | 2034 | list_del_init(&req->list); |
| 2031 | request_end(fc, req); | 2035 | request_end(fc, req); |
| @@ -2103,6 +2107,8 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
| 2103 | spin_lock(&fiq->waitq.lock); | 2107 | spin_lock(&fiq->waitq.lock); |
| 2104 | fiq->connected = 0; | 2108 | fiq->connected = 0; |
| 2105 | list_splice_init(&fiq->pending, &to_end2); | 2109 | list_splice_init(&fiq->pending, &to_end2); |
| 2110 | list_for_each_entry(req, &to_end2, list) | ||
| 2111 | clear_bit(FR_PENDING, &req->flags); | ||
| 2106 | while (forget_pending(fiq)) | 2112 | while (forget_pending(fiq)) |
| 2107 | kfree(dequeue_forget(fiq, 1, NULL)); | 2113 | kfree(dequeue_forget(fiq, 1, NULL)); |
| 2108 | wake_up_all_locked(&fiq->waitq); | 2114 | wake_up_all_locked(&fiq->waitq); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 1f7c732f32b0..811fd8929a18 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
| @@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec) | |||
| 68 | if (sec || nsec) { | 68 | if (sec || nsec) { |
| 69 | struct timespec64 ts = { | 69 | struct timespec64 ts = { |
| 70 | sec, | 70 | sec, |
| 71 | max_t(u32, nsec, NSEC_PER_SEC - 1) | 71 | min_t(u32, nsec, NSEC_PER_SEC - 1) |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| 74 | return get_jiffies_64() + timespec64_to_jiffies(&ts); | 74 | return get_jiffies_64() + timespec64_to_jiffies(&ts); |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 91307940c8ac..052f8d3c41cb 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
| @@ -256,7 +256,7 @@ struct fuse_io_priv { | |||
| 256 | 256 | ||
| 257 | #define FUSE_IO_PRIV_SYNC(f) \ | 257 | #define FUSE_IO_PRIV_SYNC(f) \ |
| 258 | { \ | 258 | { \ |
| 259 | .refcnt = { ATOMIC_INIT(1) }, \ | 259 | .refcnt = KREF_INIT(1), \ |
| 260 | .async = 0, \ | 260 | .async = 0, \ |
| 261 | .file = f, \ | 261 | .file = f, \ |
| 262 | } | 262 | } |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 6b039d7ce160..ed7a2e252ad8 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
| @@ -143,8 +143,8 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc) | |||
| 143 | /* This is the same as calling block_write_full_page, but it also | 143 | /* This is the same as calling block_write_full_page, but it also |
| 144 | * writes pages outside of i_size | 144 | * writes pages outside of i_size |
| 145 | */ | 145 | */ |
| 146 | int gfs2_write_full_page(struct page *page, get_block_t *get_block, | 146 | static int gfs2_write_full_page(struct page *page, get_block_t *get_block, |
| 147 | struct writeback_control *wbc) | 147 | struct writeback_control *wbc) |
| 148 | { | 148 | { |
| 149 | struct inode * const inode = page->mapping->host; | 149 | struct inode * const inode = page->mapping->host; |
| 150 | loff_t i_size = i_size_read(inode); | 150 | loff_t i_size = i_size_read(inode); |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index fc5da4cbe88c..01b97c012c6e 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
| @@ -720,6 +720,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 720 | { | 720 | { |
| 721 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 721 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
| 722 | struct gfs2_rgrp_list rlist; | 722 | struct gfs2_rgrp_list rlist; |
| 723 | struct gfs2_trans *tr; | ||
| 723 | u64 bn, bstart; | 724 | u64 bn, bstart; |
| 724 | u32 blen, btotal; | 725 | u32 blen, btotal; |
| 725 | __be64 *p; | 726 | __be64 *p; |
| @@ -728,6 +729,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 728 | unsigned int revokes = 0; | 729 | unsigned int revokes = 0; |
| 729 | int x; | 730 | int x; |
| 730 | int error; | 731 | int error; |
| 732 | int jblocks_rqsted; | ||
| 731 | 733 | ||
| 732 | error = gfs2_rindex_update(sdp); | 734 | error = gfs2_rindex_update(sdp); |
| 733 | if (error) | 735 | if (error) |
| @@ -791,12 +793,17 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 791 | if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */ | 793 | if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */ |
| 792 | gfs2_rs_deltree(&ip->i_res); | 794 | gfs2_rs_deltree(&ip->i_res); |
| 793 | 795 | ||
| 794 | error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + | 796 | restart: |
| 795 | RES_INDIRECT + RES_STATFS + RES_QUOTA, | 797 | jblocks_rqsted = rg_blocks + RES_DINODE + |
| 796 | revokes); | 798 | RES_INDIRECT + RES_STATFS + RES_QUOTA + |
| 799 | gfs2_struct2blk(sdp, revokes, sizeof(u64)); | ||
| 800 | if (jblocks_rqsted > atomic_read(&sdp->sd_log_thresh2)) | ||
| 801 | jblocks_rqsted = atomic_read(&sdp->sd_log_thresh2); | ||
| 802 | error = gfs2_trans_begin(sdp, jblocks_rqsted, revokes); | ||
| 797 | if (error) | 803 | if (error) |
| 798 | goto out_rg_gunlock; | 804 | goto out_rg_gunlock; |
| 799 | 805 | ||
| 806 | tr = current->journal_info; | ||
| 800 | down_write(&ip->i_rw_mutex); | 807 | down_write(&ip->i_rw_mutex); |
| 801 | 808 | ||
| 802 | gfs2_trans_add_meta(ip->i_gl, dibh); | 809 | gfs2_trans_add_meta(ip->i_gl, dibh); |
| @@ -810,6 +817,16 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 810 | if (!*p) | 817 | if (!*p) |
| 811 | continue; | 818 | continue; |
| 812 | 819 | ||
| 820 | /* check for max reasonable journal transaction blocks */ | ||
| 821 | if (tr->tr_num_buf_new + RES_STATFS + | ||
| 822 | RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) { | ||
| 823 | if (rg_blocks >= tr->tr_num_buf_new) | ||
| 824 | rg_blocks -= tr->tr_num_buf_new; | ||
| 825 | else | ||
| 826 | rg_blocks = 0; | ||
| 827 | break; | ||
| 828 | } | ||
| 829 | |||
| 813 | bn = be64_to_cpu(*p); | 830 | bn = be64_to_cpu(*p); |
| 814 | 831 | ||
| 815 | if (bstart + blen == bn) | 832 | if (bstart + blen == bn) |
| @@ -827,6 +844,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 827 | *p = 0; | 844 | *p = 0; |
| 828 | gfs2_add_inode_blocks(&ip->i_inode, -1); | 845 | gfs2_add_inode_blocks(&ip->i_inode, -1); |
| 829 | } | 846 | } |
| 847 | if (p == bottom) | ||
| 848 | rg_blocks = 0; | ||
| 849 | |||
| 830 | if (bstart) { | 850 | if (bstart) { |
| 831 | __gfs2_free_blocks(ip, bstart, blen, metadata); | 851 | __gfs2_free_blocks(ip, bstart, blen, metadata); |
| 832 | btotal += blen; | 852 | btotal += blen; |
| @@ -844,6 +864,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
| 844 | 864 | ||
| 845 | gfs2_trans_end(sdp); | 865 | gfs2_trans_end(sdp); |
| 846 | 866 | ||
| 867 | if (rg_blocks) | ||
| 868 | goto restart; | ||
| 869 | |||
| 847 | out_rg_gunlock: | 870 | out_rg_gunlock: |
| 848 | gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); | 871 | gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); |
| 849 | out_rlist: | 872 | out_rlist: |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 94f50cac91c6..20a13716a672 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -1420,26 +1420,32 @@ static struct shrinker glock_shrinker = { | |||
| 1420 | * @sdp: the filesystem | 1420 | * @sdp: the filesystem |
| 1421 | * @bucket: the bucket | 1421 | * @bucket: the bucket |
| 1422 | * | 1422 | * |
| 1423 | * Note that the function can be called multiple times on the same | ||
| 1424 | * object. So the user must ensure that the function can cope with | ||
| 1425 | * that. | ||
| 1423 | */ | 1426 | */ |
| 1424 | 1427 | ||
| 1425 | static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) | 1428 | static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) |
| 1426 | { | 1429 | { |
| 1427 | struct gfs2_glock *gl; | 1430 | struct gfs2_glock *gl; |
| 1428 | struct rhash_head *pos; | 1431 | struct rhashtable_iter iter; |
| 1429 | const struct bucket_table *tbl; | ||
| 1430 | int i; | ||
| 1431 | 1432 | ||
| 1432 | rcu_read_lock(); | 1433 | rhashtable_walk_enter(&gl_hash_table, &iter); |
| 1433 | tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table); | 1434 | |
| 1434 | for (i = 0; i < tbl->size; i++) { | 1435 | do { |
| 1435 | rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) { | 1436 | gl = ERR_PTR(rhashtable_walk_start(&iter)); |
| 1437 | if (gl) | ||
| 1438 | continue; | ||
| 1439 | |||
| 1440 | while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) | ||
| 1436 | if ((gl->gl_name.ln_sbd == sdp) && | 1441 | if ((gl->gl_name.ln_sbd == sdp) && |
| 1437 | lockref_get_not_dead(&gl->gl_lockref)) | 1442 | lockref_get_not_dead(&gl->gl_lockref)) |
| 1438 | examiner(gl); | 1443 | examiner(gl); |
| 1439 | } | 1444 | |
| 1440 | } | 1445 | rhashtable_walk_stop(&iter); |
| 1441 | rcu_read_unlock(); | 1446 | } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); |
| 1442 | cond_resched(); | 1447 | |
| 1448 | rhashtable_walk_exit(&iter); | ||
| 1443 | } | 1449 | } |
| 1444 | 1450 | ||
| 1445 | /** | 1451 | /** |
| @@ -1802,16 +1808,18 @@ void gfs2_glock_exit(void) | |||
| 1802 | 1808 | ||
| 1803 | static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi) | 1809 | static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |
| 1804 | { | 1810 | { |
| 1805 | do { | 1811 | while ((gi->gl = rhashtable_walk_next(&gi->hti))) { |
| 1806 | gi->gl = rhashtable_walk_next(&gi->hti); | ||
| 1807 | if (IS_ERR(gi->gl)) { | 1812 | if (IS_ERR(gi->gl)) { |
| 1808 | if (PTR_ERR(gi->gl) == -EAGAIN) | 1813 | if (PTR_ERR(gi->gl) == -EAGAIN) |
| 1809 | continue; | 1814 | continue; |
| 1810 | gi->gl = NULL; | 1815 | gi->gl = NULL; |
| 1816 | return; | ||
| 1811 | } | 1817 | } |
| 1812 | /* Skip entries for other sb and dead entries */ | 1818 | /* Skip entries for other sb and dead entries */ |
| 1813 | } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) || | 1819 | if (gi->sdp == gi->gl->gl_name.ln_sbd && |
| 1814 | __lockref_is_dead(&gi->gl->gl_lockref))); | 1820 | !__lockref_is_dead(&gi->gl->gl_lockref)) |
| 1821 | return; | ||
| 1822 | } | ||
| 1815 | } | 1823 | } |
| 1816 | 1824 | ||
| 1817 | static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) | 1825 | static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index a6a3389a07fc..c45084ac642d 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
| @@ -470,15 +470,19 @@ struct gfs2_quota_data { | |||
| 470 | struct rcu_head qd_rcu; | 470 | struct rcu_head qd_rcu; |
| 471 | }; | 471 | }; |
| 472 | 472 | ||
| 473 | enum { | ||
| 474 | TR_TOUCHED = 1, | ||
| 475 | TR_ATTACHED = 2, | ||
| 476 | TR_ALLOCED = 3, | ||
| 477 | }; | ||
| 478 | |||
| 473 | struct gfs2_trans { | 479 | struct gfs2_trans { |
| 474 | unsigned long tr_ip; | 480 | unsigned long tr_ip; |
| 475 | 481 | ||
| 476 | unsigned int tr_blocks; | 482 | unsigned int tr_blocks; |
| 477 | unsigned int tr_revokes; | 483 | unsigned int tr_revokes; |
| 478 | unsigned int tr_reserved; | 484 | unsigned int tr_reserved; |
| 479 | unsigned int tr_touched:1; | 485 | unsigned long tr_flags; |
| 480 | unsigned int tr_attached:1; | ||
| 481 | unsigned int tr_alloced:1; | ||
| 482 | 486 | ||
| 483 | unsigned int tr_num_buf_new; | 487 | unsigned int tr_num_buf_new; |
| 484 | unsigned int tr_num_databuf_new; | 488 | unsigned int tr_num_databuf_new; |
| @@ -794,6 +798,7 @@ struct gfs2_sbd { | |||
| 794 | atomic_t sd_log_thresh1; | 798 | atomic_t sd_log_thresh1; |
| 795 | atomic_t sd_log_thresh2; | 799 | atomic_t sd_log_thresh2; |
| 796 | atomic_t sd_log_blks_free; | 800 | atomic_t sd_log_blks_free; |
| 801 | atomic_t sd_log_blks_needed; | ||
| 797 | wait_queue_head_t sd_log_waitq; | 802 | wait_queue_head_t sd_log_waitq; |
| 798 | wait_queue_head_t sd_logd_waitq; | 803 | wait_queue_head_t sd_logd_waitq; |
| 799 | 804 | ||
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 27c00a16def0..f865b96374df 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
| @@ -349,6 +349,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |||
| 349 | if (gfs2_assert_warn(sdp, blks) || | 349 | if (gfs2_assert_warn(sdp, blks) || |
| 350 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | 350 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) |
| 351 | return -EINVAL; | 351 | return -EINVAL; |
| 352 | atomic_add(blks, &sdp->sd_log_blks_needed); | ||
| 352 | retry: | 353 | retry: |
| 353 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | 354 | free_blocks = atomic_read(&sdp->sd_log_blks_free); |
| 354 | if (unlikely(free_blocks <= wanted)) { | 355 | if (unlikely(free_blocks <= wanted)) { |
| @@ -370,6 +371,7 @@ retry: | |||
| 370 | wake_up(&sdp->sd_reserving_log_wait); | 371 | wake_up(&sdp->sd_reserving_log_wait); |
| 371 | goto retry; | 372 | goto retry; |
| 372 | } | 373 | } |
| 374 | atomic_sub(blks, &sdp->sd_log_blks_needed); | ||
| 373 | trace_gfs2_log_blocks(sdp, -blks); | 375 | trace_gfs2_log_blocks(sdp, -blks); |
| 374 | 376 | ||
| 375 | /* | 377 | /* |
| @@ -797,7 +799,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, | |||
| 797 | 799 | ||
| 798 | static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) | 800 | static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) |
| 799 | { | 801 | { |
| 800 | WARN_ON_ONCE(old->tr_attached != 1); | 802 | WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); |
| 801 | 803 | ||
| 802 | old->tr_num_buf_new += new->tr_num_buf_new; | 804 | old->tr_num_buf_new += new->tr_num_buf_new; |
| 803 | old->tr_num_databuf_new += new->tr_num_databuf_new; | 805 | old->tr_num_databuf_new += new->tr_num_databuf_new; |
| @@ -821,9 +823,9 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |||
| 821 | if (sdp->sd_log_tr) { | 823 | if (sdp->sd_log_tr) { |
| 822 | gfs2_merge_trans(sdp->sd_log_tr, tr); | 824 | gfs2_merge_trans(sdp->sd_log_tr, tr); |
| 823 | } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { | 825 | } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { |
| 824 | gfs2_assert_withdraw(sdp, tr->tr_alloced); | 826 | gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); |
| 825 | sdp->sd_log_tr = tr; | 827 | sdp->sd_log_tr = tr; |
| 826 | tr->tr_attached = 1; | 828 | set_bit(TR_ATTACHED, &tr->tr_flags); |
| 827 | } | 829 | } |
| 828 | 830 | ||
| 829 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; | 831 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
| @@ -891,13 +893,16 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |||
| 891 | 893 | ||
| 892 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) | 894 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
| 893 | { | 895 | { |
| 894 | return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1)); | 896 | return (atomic_read(&sdp->sd_log_pinned) + |
| 897 | atomic_read(&sdp->sd_log_blks_needed) >= | ||
| 898 | atomic_read(&sdp->sd_log_thresh1)); | ||
| 895 | } | 899 | } |
| 896 | 900 | ||
| 897 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | 901 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) |
| 898 | { | 902 | { |
| 899 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | 903 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); |
| 900 | return used_blocks >= atomic_read(&sdp->sd_log_thresh2); | 904 | return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= |
| 905 | atomic_read(&sdp->sd_log_thresh2); | ||
| 901 | } | 906 | } |
| 902 | 907 | ||
| 903 | /** | 908 | /** |
| @@ -913,12 +918,15 @@ int gfs2_logd(void *data) | |||
| 913 | struct gfs2_sbd *sdp = data; | 918 | struct gfs2_sbd *sdp = data; |
| 914 | unsigned long t = 1; | 919 | unsigned long t = 1; |
| 915 | DEFINE_WAIT(wait); | 920 | DEFINE_WAIT(wait); |
| 921 | bool did_flush; | ||
| 916 | 922 | ||
| 917 | while (!kthread_should_stop()) { | 923 | while (!kthread_should_stop()) { |
| 918 | 924 | ||
| 925 | did_flush = false; | ||
| 919 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { | 926 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
| 920 | gfs2_ail1_empty(sdp); | 927 | gfs2_ail1_empty(sdp); |
| 921 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); | 928 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
| 929 | did_flush = true; | ||
| 922 | } | 930 | } |
| 923 | 931 | ||
| 924 | if (gfs2_ail_flush_reqd(sdp)) { | 932 | if (gfs2_ail_flush_reqd(sdp)) { |
| @@ -926,9 +934,10 @@ int gfs2_logd(void *data) | |||
| 926 | gfs2_ail1_wait(sdp); | 934 | gfs2_ail1_wait(sdp); |
| 927 | gfs2_ail1_empty(sdp); | 935 | gfs2_ail1_empty(sdp); |
| 928 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); | 936 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
| 937 | did_flush = true; | ||
| 929 | } | 938 | } |
| 930 | 939 | ||
| 931 | if (!gfs2_ail_flush_reqd(sdp)) | 940 | if (!gfs2_ail_flush_reqd(sdp) || did_flush) |
| 932 | wake_up(&sdp->sd_log_waitq); | 941 | wake_up(&sdp->sd_log_waitq); |
| 933 | 942 | ||
| 934 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; | 943 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 49db8ef13fdf..663ffc135ef3 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
| @@ -292,7 +292,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, | |||
| 292 | wait_on_buffer(bh); | 292 | wait_on_buffer(bh); |
| 293 | if (unlikely(!buffer_uptodate(bh))) { | 293 | if (unlikely(!buffer_uptodate(bh))) { |
| 294 | struct gfs2_trans *tr = current->journal_info; | 294 | struct gfs2_trans *tr = current->journal_info; |
| 295 | if (tr && tr->tr_touched) | 295 | if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) |
| 296 | gfs2_io_error_bh(sdp, bh); | 296 | gfs2_io_error_bh(sdp, bh); |
| 297 | brelse(bh); | 297 | brelse(bh); |
| 298 | *bhp = NULL; | 298 | *bhp = NULL; |
| @@ -319,7 +319,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) | |||
| 319 | 319 | ||
| 320 | if (!buffer_uptodate(bh)) { | 320 | if (!buffer_uptodate(bh)) { |
| 321 | struct gfs2_trans *tr = current->journal_info; | 321 | struct gfs2_trans *tr = current->journal_info; |
| 322 | if (tr && tr->tr_touched) | 322 | if (tr && test_bit(TR_TOUCHED, &tr->tr_flags)) |
| 323 | gfs2_io_error_bh(sdp, bh); | 323 | gfs2_io_error_bh(sdp, bh); |
| 324 | return -EIO; | 324 | return -EIO; |
| 325 | } | 325 | } |
| @@ -345,7 +345,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, int meta) | |||
| 345 | tr->tr_num_buf_rm++; | 345 | tr->tr_num_buf_rm++; |
| 346 | else | 346 | else |
| 347 | tr->tr_num_databuf_rm++; | 347 | tr->tr_num_databuf_rm++; |
| 348 | tr->tr_touched = 1; | 348 | set_bit(TR_TOUCHED, &tr->tr_flags); |
| 349 | was_pinned = 1; | 349 | was_pinned = 1; |
| 350 | brelse(bh); | 350 | brelse(bh); |
| 351 | } | 351 | } |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index a34308df927f..b108e7ba81af 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
| @@ -683,6 +683,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
| 683 | goto fail_jindex; | 683 | goto fail_jindex; |
| 684 | } | 684 | } |
| 685 | 685 | ||
| 686 | atomic_set(&sdp->sd_log_blks_needed, 0); | ||
| 686 | if (sdp->sd_args.ar_spectator) { | 687 | if (sdp->sd_args.ar_spectator) { |
| 687 | sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); | 688 | sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0); |
| 688 | atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); | 689 | atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks); |
| @@ -1226,7 +1227,7 @@ static int set_gfs2_super(struct super_block *s, void *data) | |||
| 1226 | * We set the bdi here to the queue backing, file systems can | 1227 | * We set the bdi here to the queue backing, file systems can |
| 1227 | * overwrite this in ->fill_super() | 1228 | * overwrite this in ->fill_super() |
| 1228 | */ | 1229 | */ |
| 1229 | s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; | 1230 | s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info; |
| 1230 | return 0; | 1231 | return 0; |
| 1231 | } | 1232 | } |
| 1232 | 1233 | ||
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 0c1bde395062..affef3c066e0 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c | |||
| @@ -48,7 +48,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, | |||
| 48 | tr->tr_blocks = blocks; | 48 | tr->tr_blocks = blocks; |
| 49 | tr->tr_revokes = revokes; | 49 | tr->tr_revokes = revokes; |
| 50 | tr->tr_reserved = 1; | 50 | tr->tr_reserved = 1; |
| 51 | tr->tr_alloced = 1; | 51 | set_bit(TR_ALLOCED, &tr->tr_flags); |
| 52 | if (blocks) | 52 | if (blocks) |
| 53 | tr->tr_reserved += 6 + blocks; | 53 | tr->tr_reserved += 6 + blocks; |
| 54 | if (revokes) | 54 | if (revokes) |
| @@ -78,7 +78,8 @@ static void gfs2_print_trans(const struct gfs2_trans *tr) | |||
| 78 | { | 78 | { |
| 79 | pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip); | 79 | pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip); |
| 80 | pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n", | 80 | pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n", |
| 81 | tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_touched); | 81 | tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, |
| 82 | test_bit(TR_TOUCHED, &tr->tr_flags)); | ||
| 82 | pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n", | 83 | pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n", |
| 83 | tr->tr_num_buf_new, tr->tr_num_buf_rm, | 84 | tr->tr_num_buf_new, tr->tr_num_buf_rm, |
| 84 | tr->tr_num_databuf_new, tr->tr_num_databuf_rm, | 85 | tr->tr_num_databuf_new, tr->tr_num_databuf_rm, |
| @@ -89,12 +90,12 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) | |||
| 89 | { | 90 | { |
| 90 | struct gfs2_trans *tr = current->journal_info; | 91 | struct gfs2_trans *tr = current->journal_info; |
| 91 | s64 nbuf; | 92 | s64 nbuf; |
| 92 | int alloced = tr->tr_alloced; | 93 | int alloced = test_bit(TR_ALLOCED, &tr->tr_flags); |
| 93 | 94 | ||
| 94 | BUG_ON(!tr); | 95 | BUG_ON(!tr); |
| 95 | current->journal_info = NULL; | 96 | current->journal_info = NULL; |
| 96 | 97 | ||
| 97 | if (!tr->tr_touched) { | 98 | if (!test_bit(TR_TOUCHED, &tr->tr_flags)) { |
| 98 | gfs2_log_release(sdp, tr->tr_reserved); | 99 | gfs2_log_release(sdp, tr->tr_reserved); |
| 99 | if (alloced) { | 100 | if (alloced) { |
| 100 | kfree(tr); | 101 | kfree(tr); |
| @@ -112,8 +113,8 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) | |||
| 112 | gfs2_print_trans(tr); | 113 | gfs2_print_trans(tr); |
| 113 | 114 | ||
| 114 | gfs2_log_commit(sdp, tr); | 115 | gfs2_log_commit(sdp, tr); |
| 115 | if (alloced && !tr->tr_attached) | 116 | if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags)) |
| 116 | kfree(tr); | 117 | kfree(tr); |
| 117 | up_read(&sdp->sd_log_flush_lock); | 118 | up_read(&sdp->sd_log_flush_lock); |
| 118 | 119 | ||
| 119 | if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) | 120 | if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) |
| @@ -169,6 +170,10 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) | |||
| 169 | } | 170 | } |
| 170 | 171 | ||
| 171 | lock_buffer(bh); | 172 | lock_buffer(bh); |
| 173 | if (buffer_pinned(bh)) { | ||
| 174 | set_bit(TR_TOUCHED, &tr->tr_flags); | ||
| 175 | goto out; | ||
| 176 | } | ||
| 172 | gfs2_log_lock(sdp); | 177 | gfs2_log_lock(sdp); |
| 173 | bd = bh->b_private; | 178 | bd = bh->b_private; |
| 174 | if (bd == NULL) { | 179 | if (bd == NULL) { |
| @@ -182,7 +187,7 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) | |||
| 182 | gfs2_log_lock(sdp); | 187 | gfs2_log_lock(sdp); |
| 183 | } | 188 | } |
| 184 | gfs2_assert(sdp, bd->bd_gl == gl); | 189 | gfs2_assert(sdp, bd->bd_gl == gl); |
| 185 | tr->tr_touched = 1; | 190 | set_bit(TR_TOUCHED, &tr->tr_flags); |
| 186 | if (list_empty(&bd->bd_list)) { | 191 | if (list_empty(&bd->bd_list)) { |
| 187 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); | 192 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); |
| 188 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); | 193 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); |
| @@ -191,45 +196,24 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) | |||
| 191 | list_add_tail(&bd->bd_list, &tr->tr_databuf); | 196 | list_add_tail(&bd->bd_list, &tr->tr_databuf); |
| 192 | } | 197 | } |
| 193 | gfs2_log_unlock(sdp); | 198 | gfs2_log_unlock(sdp); |
| 199 | out: | ||
| 194 | unlock_buffer(bh); | 200 | unlock_buffer(bh); |
| 195 | } | 201 | } |
| 196 | 202 | ||
| 197 | static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) | ||
| 198 | { | ||
| 199 | struct gfs2_meta_header *mh; | ||
| 200 | struct gfs2_trans *tr; | ||
| 201 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); | ||
| 202 | |||
| 203 | tr = current->journal_info; | ||
| 204 | tr->tr_touched = 1; | ||
| 205 | if (!list_empty(&bd->bd_list)) | ||
| 206 | return; | ||
| 207 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); | ||
| 208 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); | ||
| 209 | mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; | ||
| 210 | if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) { | ||
| 211 | pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n", | ||
| 212 | (unsigned long long)bd->bd_bh->b_blocknr); | ||
| 213 | BUG(); | ||
| 214 | } | ||
| 215 | if (unlikely(state == SFS_FROZEN)) { | ||
| 216 | printk(KERN_INFO "GFS2:adding buf while frozen\n"); | ||
| 217 | gfs2_assert_withdraw(sdp, 0); | ||
| 218 | } | ||
| 219 | gfs2_pin(sdp, bd->bd_bh); | ||
| 220 | mh->__pad0 = cpu_to_be64(0); | ||
| 221 | mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | ||
| 222 | list_add(&bd->bd_list, &tr->tr_buf); | ||
| 223 | tr->tr_num_buf_new++; | ||
| 224 | } | ||
| 225 | |||
| 226 | void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) | 203 | void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) |
| 227 | { | 204 | { |
| 228 | 205 | ||
| 229 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | 206 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 230 | struct gfs2_bufdata *bd; | 207 | struct gfs2_bufdata *bd; |
| 208 | struct gfs2_meta_header *mh; | ||
| 209 | struct gfs2_trans *tr = current->journal_info; | ||
| 210 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); | ||
| 231 | 211 | ||
| 232 | lock_buffer(bh); | 212 | lock_buffer(bh); |
| 213 | if (buffer_pinned(bh)) { | ||
| 214 | set_bit(TR_TOUCHED, &tr->tr_flags); | ||
| 215 | goto out; | ||
| 216 | } | ||
| 233 | gfs2_log_lock(sdp); | 217 | gfs2_log_lock(sdp); |
| 234 | bd = bh->b_private; | 218 | bd = bh->b_private; |
| 235 | if (bd == NULL) { | 219 | if (bd == NULL) { |
| @@ -245,8 +229,29 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) | |||
| 245 | gfs2_log_lock(sdp); | 229 | gfs2_log_lock(sdp); |
| 246 | } | 230 | } |
| 247 | gfs2_assert(sdp, bd->bd_gl == gl); | 231 | gfs2_assert(sdp, bd->bd_gl == gl); |
| 248 | meta_lo_add(sdp, bd); | 232 | set_bit(TR_TOUCHED, &tr->tr_flags); |
| 233 | if (!list_empty(&bd->bd_list)) | ||
| 234 | goto out_unlock; | ||
| 235 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); | ||
| 236 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); | ||
| 237 | mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; | ||
| 238 | if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) { | ||
| 239 | pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n", | ||
| 240 | (unsigned long long)bd->bd_bh->b_blocknr); | ||
| 241 | BUG(); | ||
| 242 | } | ||
| 243 | if (unlikely(state == SFS_FROZEN)) { | ||
| 244 | printk(KERN_INFO "GFS2:adding buf while frozen\n"); | ||
| 245 | gfs2_assert_withdraw(sdp, 0); | ||
| 246 | } | ||
| 247 | gfs2_pin(sdp, bd->bd_bh); | ||
| 248 | mh->__pad0 = cpu_to_be64(0); | ||
| 249 | mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | ||
| 250 | list_add(&bd->bd_list, &tr->tr_buf); | ||
| 251 | tr->tr_num_buf_new++; | ||
| 252 | out_unlock: | ||
| 249 | gfs2_log_unlock(sdp); | 253 | gfs2_log_unlock(sdp); |
| 254 | out: | ||
| 250 | unlock_buffer(bh); | 255 | unlock_buffer(bh); |
| 251 | } | 256 | } |
| 252 | 257 | ||
| @@ -256,7 +261,7 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) | |||
| 256 | 261 | ||
| 257 | BUG_ON(!list_empty(&bd->bd_list)); | 262 | BUG_ON(!list_empty(&bd->bd_list)); |
| 258 | gfs2_add_revoke(sdp, bd); | 263 | gfs2_add_revoke(sdp, bd); |
| 259 | tr->tr_touched = 1; | 264 | set_bit(TR_TOUCHED, &tr->tr_flags); |
| 260 | tr->tr_num_revoke++; | 265 | tr->tr_num_revoke++; |
| 261 | } | 266 | } |
| 262 | 267 | ||
diff --git a/fs/iomap.c b/fs/iomap.c index 354a123f170e..a51cb4c07d4d 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
| @@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |||
| 114 | 114 | ||
| 115 | BUG_ON(pos + len > iomap->offset + iomap->length); | 115 | BUG_ON(pos + len > iomap->offset + iomap->length); |
| 116 | 116 | ||
| 117 | if (fatal_signal_pending(current)) | ||
| 118 | return -EINTR; | ||
| 119 | |||
| 117 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); | 120 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); |
| 118 | if (!page) | 121 | if (!page) |
| 119 | return -ENOMEM; | 122 | return -ENOMEM; |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 8c514367ba5a..b6b194ec1b4f 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
| @@ -393,7 +393,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
| 393 | /* Do we need to erase the effects of a prior jbd2_journal_flush? */ | 393 | /* Do we need to erase the effects of a prior jbd2_journal_flush? */ |
| 394 | if (journal->j_flags & JBD2_FLUSHED) { | 394 | if (journal->j_flags & JBD2_FLUSHED) { |
| 395 | jbd_debug(3, "super block updated\n"); | 395 | jbd_debug(3, "super block updated\n"); |
| 396 | mutex_lock(&journal->j_checkpoint_mutex); | 396 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 397 | /* | 397 | /* |
| 398 | * We hold j_checkpoint_mutex so tail cannot change under us. | 398 | * We hold j_checkpoint_mutex so tail cannot change under us. |
| 399 | * We don't need any special data guarantees for writing sb | 399 | * We don't need any special data guarantees for writing sb |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a097048ed1a3..a1a359bfcc9c 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -276,11 +276,11 @@ loop: | |||
| 276 | goto loop; | 276 | goto loop; |
| 277 | 277 | ||
| 278 | end_loop: | 278 | end_loop: |
| 279 | write_unlock(&journal->j_state_lock); | ||
| 280 | del_timer_sync(&journal->j_commit_timer); | 279 | del_timer_sync(&journal->j_commit_timer); |
| 281 | journal->j_task = NULL; | 280 | journal->j_task = NULL; |
| 282 | wake_up(&journal->j_wait_done_commit); | 281 | wake_up(&journal->j_wait_done_commit); |
| 283 | jbd_debug(1, "Journal thread exiting.\n"); | 282 | jbd_debug(1, "Journal thread exiting.\n"); |
| 283 | write_unlock(&journal->j_state_lock); | ||
| 284 | return 0; | 284 | return 0; |
| 285 | } | 285 | } |
| 286 | 286 | ||
| @@ -944,7 +944,7 @@ out: | |||
| 944 | */ | 944 | */ |
| 945 | void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) | 945 | void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block) |
| 946 | { | 946 | { |
| 947 | mutex_lock(&journal->j_checkpoint_mutex); | 947 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 948 | if (tid_gt(tid, journal->j_tail_sequence)) | 948 | if (tid_gt(tid, journal->j_tail_sequence)) |
| 949 | __jbd2_update_log_tail(journal, tid, block); | 949 | __jbd2_update_log_tail(journal, tid, block); |
| 950 | mutex_unlock(&journal->j_checkpoint_mutex); | 950 | mutex_unlock(&journal->j_checkpoint_mutex); |
| @@ -1304,7 +1304,7 @@ static int journal_reset(journal_t *journal) | |||
| 1304 | journal->j_flags |= JBD2_FLUSHED; | 1304 | journal->j_flags |= JBD2_FLUSHED; |
| 1305 | } else { | 1305 | } else { |
| 1306 | /* Lock here to make assertions happy... */ | 1306 | /* Lock here to make assertions happy... */ |
| 1307 | mutex_lock(&journal->j_checkpoint_mutex); | 1307 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 1308 | /* | 1308 | /* |
| 1309 | * Update log tail information. We use REQ_FUA since new | 1309 | * Update log tail information. We use REQ_FUA since new |
| 1310 | * transaction will start reusing journal space and so we | 1310 | * transaction will start reusing journal space and so we |
| @@ -1691,7 +1691,7 @@ int jbd2_journal_destroy(journal_t *journal) | |||
| 1691 | spin_lock(&journal->j_list_lock); | 1691 | spin_lock(&journal->j_list_lock); |
| 1692 | while (journal->j_checkpoint_transactions != NULL) { | 1692 | while (journal->j_checkpoint_transactions != NULL) { |
| 1693 | spin_unlock(&journal->j_list_lock); | 1693 | spin_unlock(&journal->j_list_lock); |
| 1694 | mutex_lock(&journal->j_checkpoint_mutex); | 1694 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 1695 | err = jbd2_log_do_checkpoint(journal); | 1695 | err = jbd2_log_do_checkpoint(journal); |
| 1696 | mutex_unlock(&journal->j_checkpoint_mutex); | 1696 | mutex_unlock(&journal->j_checkpoint_mutex); |
| 1697 | /* | 1697 | /* |
| @@ -1713,7 +1713,7 @@ int jbd2_journal_destroy(journal_t *journal) | |||
| 1713 | 1713 | ||
| 1714 | if (journal->j_sb_buffer) { | 1714 | if (journal->j_sb_buffer) { |
| 1715 | if (!is_journal_aborted(journal)) { | 1715 | if (!is_journal_aborted(journal)) { |
| 1716 | mutex_lock(&journal->j_checkpoint_mutex); | 1716 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 1717 | 1717 | ||
| 1718 | write_lock(&journal->j_state_lock); | 1718 | write_lock(&journal->j_state_lock); |
| 1719 | journal->j_tail_sequence = | 1719 | journal->j_tail_sequence = |
| @@ -1955,7 +1955,7 @@ int jbd2_journal_flush(journal_t *journal) | |||
| 1955 | spin_lock(&journal->j_list_lock); | 1955 | spin_lock(&journal->j_list_lock); |
| 1956 | while (!err && journal->j_checkpoint_transactions != NULL) { | 1956 | while (!err && journal->j_checkpoint_transactions != NULL) { |
| 1957 | spin_unlock(&journal->j_list_lock); | 1957 | spin_unlock(&journal->j_list_lock); |
| 1958 | mutex_lock(&journal->j_checkpoint_mutex); | 1958 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 1959 | err = jbd2_log_do_checkpoint(journal); | 1959 | err = jbd2_log_do_checkpoint(journal); |
| 1960 | mutex_unlock(&journal->j_checkpoint_mutex); | 1960 | mutex_unlock(&journal->j_checkpoint_mutex); |
| 1961 | spin_lock(&journal->j_list_lock); | 1961 | spin_lock(&journal->j_list_lock); |
| @@ -1965,7 +1965,7 @@ int jbd2_journal_flush(journal_t *journal) | |||
| 1965 | if (is_journal_aborted(journal)) | 1965 | if (is_journal_aborted(journal)) |
| 1966 | return -EIO; | 1966 | return -EIO; |
| 1967 | 1967 | ||
| 1968 | mutex_lock(&journal->j_checkpoint_mutex); | 1968 | mutex_lock_io(&journal->j_checkpoint_mutex); |
| 1969 | if (!err) { | 1969 | if (!err) { |
| 1970 | err = jbd2_cleanup_journal_tail(journal); | 1970 | err = jbd2_cleanup_journal_tail(journal); |
| 1971 | if (err < 0) { | 1971 | if (err < 0) { |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e1652665bd93..5e659ee08d6a 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
| @@ -1863,7 +1863,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) | |||
| 1863 | 1863 | ||
| 1864 | __blist_del_buffer(list, jh); | 1864 | __blist_del_buffer(list, jh); |
| 1865 | jh->b_jlist = BJ_None; | 1865 | jh->b_jlist = BJ_None; |
| 1866 | if (test_clear_buffer_jbddirty(bh)) | 1866 | if (transaction && is_journal_aborted(transaction->t_journal)) |
| 1867 | clear_buffer_jbddirty(bh); | ||
| 1868 | else if (test_clear_buffer_jbddirty(bh)) | ||
| 1867 | mark_buffer_dirty(bh); /* Expose it to the VM */ | 1869 | mark_buffer_dirty(bh); /* Expose it to the VM */ |
| 1868 | } | 1870 | } |
| 1869 | 1871 | ||
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index cf4c636ff4da..439b946c4808 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c | |||
| @@ -41,6 +41,9 @@ static bool kernfs_lockdep(struct kernfs_node *kn) | |||
| 41 | 41 | ||
| 42 | static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) | 42 | static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) |
| 43 | { | 43 | { |
| 44 | if (!kn) | ||
| 45 | return strlcpy(buf, "(null)", buflen); | ||
| 46 | |||
| 44 | return strlcpy(buf, kn->parent ? kn->name : "/", buflen); | 47 | return strlcpy(buf, kn->parent ? kn->name : "/", buflen); |
| 45 | } | 48 | } |
| 46 | 49 | ||
| @@ -110,6 +113,8 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a, | |||
| 110 | * kn_to: /n1/n2/n3 [depth=3] | 113 | * kn_to: /n1/n2/n3 [depth=3] |
| 111 | * result: /../.. | 114 | * result: /../.. |
| 112 | * | 115 | * |
| 116 | * [3] when @kn_to is NULL result will be "(null)" | ||
| 117 | * | ||
| 113 | * Returns the length of the full path. If the full length is equal to or | 118 | * Returns the length of the full path. If the full length is equal to or |
| 114 | * greater than @buflen, @buf contains the truncated path with the trailing | 119 | * greater than @buflen, @buf contains the truncated path with the trailing |
| 115 | * '\0'. On error, -errno is returned. | 120 | * '\0'. On error, -errno is returned. |
| @@ -123,6 +128,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, | |||
| 123 | size_t depth_from, depth_to, len = 0; | 128 | size_t depth_from, depth_to, len = 0; |
| 124 | int i, j; | 129 | int i, j; |
| 125 | 130 | ||
| 131 | if (!kn_to) | ||
| 132 | return strlcpy(buf, "(null)", buflen); | ||
| 133 | |||
| 126 | if (!kn_from) | 134 | if (!kn_from) |
| 127 | kn_from = kernfs_root(kn_to)->kn; | 135 | kn_from = kernfs_root(kn_to)->kn; |
| 128 | 136 | ||
| @@ -166,6 +174,8 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, | |||
| 166 | * similar to strlcpy(). It returns the length of @kn's name and if @buf | 174 | * similar to strlcpy(). It returns the length of @kn's name and if @buf |
| 167 | * isn't long enough, it's filled upto @buflen-1 and nul terminated. | 175 | * isn't long enough, it's filled upto @buflen-1 and nul terminated. |
| 168 | * | 176 | * |
| 177 | * Fills buffer with "(null)" if @kn is NULL. | ||
| 178 | * | ||
| 169 | * This function can be called from any context. | 179 | * This function can be called from any context. |
| 170 | */ | 180 | */ |
| 171 | int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) | 181 | int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) |
diff --git a/fs/libfs.c b/fs/libfs.c index e973cd51f126..28d6f35feed6 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
| @@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name, | |||
| 245 | struct inode *root; | 245 | struct inode *root; |
| 246 | struct qstr d_name = QSTR_INIT(name, strlen(name)); | 246 | struct qstr d_name = QSTR_INIT(name, strlen(name)); |
| 247 | 247 | ||
| 248 | s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL); | 248 | s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER, |
| 249 | &init_user_ns, NULL); | ||
| 249 | if (IS_ERR(s)) | 250 | if (IS_ERR(s)) |
| 250 | return ERR_CAST(s); | 251 | return ERR_CAST(s); |
| 251 | 252 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index b5b1259e064f..487ba30bb5c6 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -742,26 +742,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry) | |||
| 742 | return NULL; | 742 | return NULL; |
| 743 | } | 743 | } |
| 744 | 744 | ||
| 745 | static struct mountpoint *new_mountpoint(struct dentry *dentry) | 745 | static struct mountpoint *get_mountpoint(struct dentry *dentry) |
| 746 | { | 746 | { |
| 747 | struct hlist_head *chain = mp_hash(dentry); | 747 | struct mountpoint *mp, *new = NULL; |
| 748 | struct mountpoint *mp; | ||
| 749 | int ret; | 748 | int ret; |
| 750 | 749 | ||
| 751 | mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); | 750 | if (d_mountpoint(dentry)) { |
| 752 | if (!mp) | 751 | mountpoint: |
| 752 | read_seqlock_excl(&mount_lock); | ||
| 753 | mp = lookup_mountpoint(dentry); | ||
| 754 | read_sequnlock_excl(&mount_lock); | ||
| 755 | if (mp) | ||
| 756 | goto done; | ||
| 757 | } | ||
| 758 | |||
| 759 | if (!new) | ||
| 760 | new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); | ||
| 761 | if (!new) | ||
| 753 | return ERR_PTR(-ENOMEM); | 762 | return ERR_PTR(-ENOMEM); |
| 754 | 763 | ||
| 764 | |||
| 765 | /* Exactly one processes may set d_mounted */ | ||
| 755 | ret = d_set_mounted(dentry); | 766 | ret = d_set_mounted(dentry); |
| 756 | if (ret) { | ||
| 757 | kfree(mp); | ||
| 758 | return ERR_PTR(ret); | ||
| 759 | } | ||
| 760 | 767 | ||
| 761 | mp->m_dentry = dentry; | 768 | /* Someone else set d_mounted? */ |
| 762 | mp->m_count = 1; | 769 | if (ret == -EBUSY) |
| 763 | hlist_add_head(&mp->m_hash, chain); | 770 | goto mountpoint; |
| 764 | INIT_HLIST_HEAD(&mp->m_list); | 771 | |
| 772 | /* The dentry is not available as a mountpoint? */ | ||
| 773 | mp = ERR_PTR(ret); | ||
| 774 | if (ret) | ||
| 775 | goto done; | ||
| 776 | |||
| 777 | /* Add the new mountpoint to the hash table */ | ||
| 778 | read_seqlock_excl(&mount_lock); | ||
| 779 | new->m_dentry = dentry; | ||
| 780 | new->m_count = 1; | ||
| 781 | hlist_add_head(&new->m_hash, mp_hash(dentry)); | ||
| 782 | INIT_HLIST_HEAD(&new->m_list); | ||
| 783 | read_sequnlock_excl(&mount_lock); | ||
| 784 | |||
| 785 | mp = new; | ||
| 786 | new = NULL; | ||
| 787 | done: | ||
| 788 | kfree(new); | ||
| 765 | return mp; | 789 | return mp; |
| 766 | } | 790 | } |
| 767 | 791 | ||
| @@ -1595,11 +1619,11 @@ void __detach_mounts(struct dentry *dentry) | |||
| 1595 | struct mount *mnt; | 1619 | struct mount *mnt; |
| 1596 | 1620 | ||
| 1597 | namespace_lock(); | 1621 | namespace_lock(); |
| 1622 | lock_mount_hash(); | ||
| 1598 | mp = lookup_mountpoint(dentry); | 1623 | mp = lookup_mountpoint(dentry); |
| 1599 | if (IS_ERR_OR_NULL(mp)) | 1624 | if (IS_ERR_OR_NULL(mp)) |
| 1600 | goto out_unlock; | 1625 | goto out_unlock; |
| 1601 | 1626 | ||
| 1602 | lock_mount_hash(); | ||
| 1603 | event++; | 1627 | event++; |
| 1604 | while (!hlist_empty(&mp->m_list)) { | 1628 | while (!hlist_empty(&mp->m_list)) { |
| 1605 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); | 1629 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); |
| @@ -1609,9 +1633,9 @@ void __detach_mounts(struct dentry *dentry) | |||
| 1609 | } | 1633 | } |
| 1610 | else umount_tree(mnt, UMOUNT_CONNECTED); | 1634 | else umount_tree(mnt, UMOUNT_CONNECTED); |
| 1611 | } | 1635 | } |
| 1612 | unlock_mount_hash(); | ||
| 1613 | put_mountpoint(mp); | 1636 | put_mountpoint(mp); |
| 1614 | out_unlock: | 1637 | out_unlock: |
| 1638 | unlock_mount_hash(); | ||
| 1615 | namespace_unlock(); | 1639 | namespace_unlock(); |
| 1616 | } | 1640 | } |
| 1617 | 1641 | ||
| @@ -2038,9 +2062,7 @@ retry: | |||
| 2038 | namespace_lock(); | 2062 | namespace_lock(); |
| 2039 | mnt = lookup_mnt(path); | 2063 | mnt = lookup_mnt(path); |
| 2040 | if (likely(!mnt)) { | 2064 | if (likely(!mnt)) { |
| 2041 | struct mountpoint *mp = lookup_mountpoint(dentry); | 2065 | struct mountpoint *mp = get_mountpoint(dentry); |
| 2042 | if (!mp) | ||
| 2043 | mp = new_mountpoint(dentry); | ||
| 2044 | if (IS_ERR(mp)) { | 2066 | if (IS_ERR(mp)) { |
| 2045 | namespace_unlock(); | 2067 | namespace_unlock(); |
| 2046 | inode_unlock(dentry->d_inode); | 2068 | inode_unlock(dentry->d_inode); |
| @@ -2059,7 +2081,11 @@ retry: | |||
| 2059 | static void unlock_mount(struct mountpoint *where) | 2081 | static void unlock_mount(struct mountpoint *where) |
| 2060 | { | 2082 | { |
| 2061 | struct dentry *dentry = where->m_dentry; | 2083 | struct dentry *dentry = where->m_dentry; |
| 2084 | |||
| 2085 | read_seqlock_excl(&mount_lock); | ||
| 2062 | put_mountpoint(where); | 2086 | put_mountpoint(where); |
| 2087 | read_sequnlock_excl(&mount_lock); | ||
| 2088 | |||
| 2063 | namespace_unlock(); | 2089 | namespace_unlock(); |
| 2064 | inode_unlock(dentry->d_inode); | 2090 | inode_unlock(dentry->d_inode); |
| 2065 | } | 2091 | } |
| @@ -3135,9 +3161,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, | |||
| 3135 | touch_mnt_namespace(current->nsproxy->mnt_ns); | 3161 | touch_mnt_namespace(current->nsproxy->mnt_ns); |
| 3136 | /* A moved mount should not expire automatically */ | 3162 | /* A moved mount should not expire automatically */ |
| 3137 | list_del_init(&new_mnt->mnt_expire); | 3163 | list_del_init(&new_mnt->mnt_expire); |
| 3164 | put_mountpoint(root_mp); | ||
| 3138 | unlock_mount_hash(); | 3165 | unlock_mount_hash(); |
| 3139 | chroot_fs_refs(&root, &new); | 3166 | chroot_fs_refs(&root, &new); |
| 3140 | put_mountpoint(root_mp); | ||
| 3141 | error = 0; | 3167 | error = 0; |
| 3142 | out4: | 3168 | out4: |
| 3143 | unlock_mount(old_mp); | 3169 | unlock_mount(old_mp); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6dcbc5defb7a..0a0eaecf9676 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
| 39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
| 40 | #include <linux/errno.h> | 40 | #include <linux/errno.h> |
| 41 | #include <linux/file.h> | ||
| 42 | #include <linux/string.h> | 41 | #include <linux/string.h> |
| 43 | #include <linux/ratelimit.h> | 42 | #include <linux/ratelimit.h> |
| 44 | #include <linux/printk.h> | 43 | #include <linux/printk.h> |
| @@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt, | |||
| 1083 | return nfs4_call_sync_sequence(clnt, server, msg, args, res); | 1082 | return nfs4_call_sync_sequence(clnt, server, msg, args, res); |
| 1084 | } | 1083 | } |
| 1085 | 1084 | ||
| 1086 | static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) | 1085 | static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, |
| 1086 | unsigned long timestamp) | ||
| 1087 | { | 1087 | { |
| 1088 | struct nfs_inode *nfsi = NFS_I(dir); | 1088 | struct nfs_inode *nfsi = NFS_I(dir); |
| 1089 | 1089 | ||
| @@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) | |||
| 1099 | NFS_INO_INVALID_ACL; | 1099 | NFS_INO_INVALID_ACL; |
| 1100 | } | 1100 | } |
| 1101 | dir->i_version = cinfo->after; | 1101 | dir->i_version = cinfo->after; |
| 1102 | nfsi->read_cache_jiffies = timestamp; | ||
| 1102 | nfsi->attr_gencount = nfs_inc_attr_generation_counter(); | 1103 | nfsi->attr_gencount = nfs_inc_attr_generation_counter(); |
| 1103 | nfs_fscache_invalidate(dir); | 1104 | nfs_fscache_invalidate(dir); |
| 1104 | spin_unlock(&dir->i_lock); | 1105 | spin_unlock(&dir->i_lock); |
| @@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) | |||
| 2391 | nfs_fattr_map_and_free_names(server, &data->f_attr); | 2392 | nfs_fattr_map_and_free_names(server, &data->f_attr); |
| 2392 | 2393 | ||
| 2393 | if (o_arg->open_flags & O_CREAT) { | 2394 | if (o_arg->open_flags & O_CREAT) { |
| 2394 | update_changeattr(dir, &o_res->cinfo); | ||
| 2395 | if (o_arg->open_flags & O_EXCL) | 2395 | if (o_arg->open_flags & O_EXCL) |
| 2396 | data->file_created = 1; | 2396 | data->file_created = 1; |
| 2397 | else if (o_res->cinfo.before != o_res->cinfo.after) | 2397 | else if (o_res->cinfo.before != o_res->cinfo.after) |
| 2398 | data->file_created = 1; | 2398 | data->file_created = 1; |
| 2399 | if (data->file_created || dir->i_version != o_res->cinfo.after) | ||
| 2400 | update_changeattr(dir, &o_res->cinfo, | ||
| 2401 | o_res->f_attr->time_start); | ||
| 2399 | } | 2402 | } |
| 2400 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) | 2403 | if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) |
| 2401 | server->caps &= ~NFS_CAP_POSIX_LOCK; | 2404 | server->caps &= ~NFS_CAP_POSIX_LOCK; |
| @@ -2697,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, | |||
| 2697 | sattr->ia_valid |= ATTR_MTIME; | 2700 | sattr->ia_valid |= ATTR_MTIME; |
| 2698 | 2701 | ||
| 2699 | /* Except MODE, it seems harmless of setting twice. */ | 2702 | /* Except MODE, it seems harmless of setting twice. */ |
| 2700 | if ((attrset[1] & FATTR4_WORD1_MODE)) | 2703 | if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE && |
| 2704 | attrset[1] & FATTR4_WORD1_MODE) | ||
| 2701 | sattr->ia_valid &= ~ATTR_MODE; | 2705 | sattr->ia_valid &= ~ATTR_MODE; |
| 2702 | 2706 | ||
| 2703 | if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) | 2707 | if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) |
| @@ -4073,11 +4077,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name) | |||
| 4073 | .rpc_argp = &args, | 4077 | .rpc_argp = &args, |
| 4074 | .rpc_resp = &res, | 4078 | .rpc_resp = &res, |
| 4075 | }; | 4079 | }; |
| 4080 | unsigned long timestamp = jiffies; | ||
| 4076 | int status; | 4081 | int status; |
| 4077 | 4082 | ||
| 4078 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); | 4083 | status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); |
| 4079 | if (status == 0) | 4084 | if (status == 0) |
| 4080 | update_changeattr(dir, &res.cinfo); | 4085 | update_changeattr(dir, &res.cinfo, timestamp); |
| 4081 | return status; | 4086 | return status; |
| 4082 | } | 4087 | } |
| 4083 | 4088 | ||
| @@ -4125,7 +4130,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) | |||
| 4125 | if (nfs4_async_handle_error(task, res->server, NULL, | 4130 | if (nfs4_async_handle_error(task, res->server, NULL, |
| 4126 | &data->timeout) == -EAGAIN) | 4131 | &data->timeout) == -EAGAIN) |
| 4127 | return 0; | 4132 | return 0; |
| 4128 | update_changeattr(dir, &res->cinfo); | 4133 | if (task->tk_status == 0) |
| 4134 | update_changeattr(dir, &res->cinfo, res->dir_attr->time_start); | ||
| 4129 | return 1; | 4135 | return 1; |
| 4130 | } | 4136 | } |
| 4131 | 4137 | ||
| @@ -4159,8 +4165,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, | |||
| 4159 | if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) | 4165 | if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) |
| 4160 | return 0; | 4166 | return 0; |
| 4161 | 4167 | ||
| 4162 | update_changeattr(old_dir, &res->old_cinfo); | 4168 | if (task->tk_status == 0) { |
| 4163 | update_changeattr(new_dir, &res->new_cinfo); | 4169 | update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start); |
| 4170 | if (new_dir != old_dir) | ||
| 4171 | update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start); | ||
| 4172 | } | ||
| 4164 | return 1; | 4173 | return 1; |
| 4165 | } | 4174 | } |
| 4166 | 4175 | ||
| @@ -4197,7 +4206,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct | |||
| 4197 | 4206 | ||
| 4198 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); | 4207 | status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); |
| 4199 | if (!status) { | 4208 | if (!status) { |
| 4200 | update_changeattr(dir, &res.cinfo); | 4209 | update_changeattr(dir, &res.cinfo, res.fattr->time_start); |
| 4201 | status = nfs_post_op_update_inode(inode, res.fattr); | 4210 | status = nfs_post_op_update_inode(inode, res.fattr); |
| 4202 | if (!status) | 4211 | if (!status) |
| 4203 | nfs_setsecurity(inode, res.fattr, res.label); | 4212 | nfs_setsecurity(inode, res.fattr, res.label); |
| @@ -4272,7 +4281,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_ | |||
| 4272 | int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, | 4281 | int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, |
| 4273 | &data->arg.seq_args, &data->res.seq_res, 1); | 4282 | &data->arg.seq_args, &data->res.seq_res, 1); |
| 4274 | if (status == 0) { | 4283 | if (status == 0) { |
| 4275 | update_changeattr(dir, &data->res.dir_cinfo); | 4284 | update_changeattr(dir, &data->res.dir_cinfo, |
| 4285 | data->res.fattr->time_start); | ||
| 4276 | status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); | 4286 | status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); |
| 4277 | } | 4287 | } |
| 4278 | return status; | 4288 | return status; |
| @@ -6127,7 +6137,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, | |||
| 6127 | p->server = server; | 6137 | p->server = server; |
| 6128 | atomic_inc(&lsp->ls_count); | 6138 | atomic_inc(&lsp->ls_count); |
| 6129 | p->ctx = get_nfs_open_context(ctx); | 6139 | p->ctx = get_nfs_open_context(ctx); |
| 6130 | get_file(fl->fl_file); | ||
| 6131 | memcpy(&p->fl, fl, sizeof(p->fl)); | 6140 | memcpy(&p->fl, fl, sizeof(p->fl)); |
| 6132 | return p; | 6141 | return p; |
| 6133 | out_free_seqid: | 6142 | out_free_seqid: |
| @@ -6240,7 +6249,6 @@ static void nfs4_lock_release(void *calldata) | |||
| 6240 | nfs_free_seqid(data->arg.lock_seqid); | 6249 | nfs_free_seqid(data->arg.lock_seqid); |
| 6241 | nfs4_put_lock_state(data->lsp); | 6250 | nfs4_put_lock_state(data->lsp); |
| 6242 | put_nfs_open_context(data->ctx); | 6251 | put_nfs_open_context(data->ctx); |
| 6243 | fput(data->fl.fl_file); | ||
| 6244 | kfree(data); | 6252 | kfree(data); |
| 6245 | dprintk("%s: done!\n", __func__); | 6253 | dprintk("%s: done!\n", __func__); |
| 6246 | } | 6254 | } |
| @@ -8483,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
| 8483 | goto out; | 8491 | goto out; |
| 8484 | } | 8492 | } |
| 8485 | 8493 | ||
| 8494 | nfs4_sequence_free_slot(&lgp->res.seq_res); | ||
| 8486 | err = nfs4_handle_exception(server, nfs4err, exception); | 8495 | err = nfs4_handle_exception(server, nfs4err, exception); |
| 8487 | if (!status) { | 8496 | if (!status) { |
| 8488 | if (exception->retry) | 8497 | if (exception->retry) |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1d152f4470cd..daeb94e3acd4 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
| @@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | |||
| 1091 | case -NFS4ERR_BADXDR: | 1091 | case -NFS4ERR_BADXDR: |
| 1092 | case -NFS4ERR_RESOURCE: | 1092 | case -NFS4ERR_RESOURCE: |
| 1093 | case -NFS4ERR_NOFILEHANDLE: | 1093 | case -NFS4ERR_NOFILEHANDLE: |
| 1094 | case -NFS4ERR_MOVED: | ||
| 1094 | /* Non-seqid mutating errors */ | 1095 | /* Non-seqid mutating errors */ |
| 1095 | return; | 1096 | return; |
| 1096 | }; | 1097 | }; |
| @@ -1729,7 +1730,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) | |||
| 1729 | break; | 1730 | break; |
| 1730 | case -NFS4ERR_STALE_CLIENTID: | 1731 | case -NFS4ERR_STALE_CLIENTID: |
| 1731 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | 1732 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); |
| 1732 | nfs4_state_clear_reclaim_reboot(clp); | ||
| 1733 | nfs4_state_start_reclaim_reboot(clp); | 1733 | nfs4_state_start_reclaim_reboot(clp); |
| 1734 | break; | 1734 | break; |
| 1735 | case -NFS4ERR_EXPIRED: | 1735 | case -NFS4ERR_EXPIRED: |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 59554f3adf29..dd042498ce7c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
| @@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino) | |||
| 1200 | 1200 | ||
| 1201 | send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); | 1201 | send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); |
| 1202 | spin_unlock(&ino->i_lock); | 1202 | spin_unlock(&ino->i_lock); |
| 1203 | pnfs_free_lseg_list(&tmp_list); | ||
| 1204 | if (send) | 1203 | if (send) |
| 1205 | status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); | 1204 | status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); |
| 1206 | out_put_layout_hdr: | 1205 | out_put_layout_hdr: |
| 1206 | pnfs_free_lseg_list(&tmp_list); | ||
| 1207 | pnfs_put_layout_hdr(lo); | 1207 | pnfs_put_layout_hdr(lo); |
| 1208 | out: | 1208 | out: |
| 1209 | dprintk("<-- %s status: %d\n", __func__, status); | 1209 | dprintk("<-- %s status: %d\n", __func__, status); |
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 47febcf99185..20b1c17320d5 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig | |||
| @@ -104,6 +104,7 @@ config NFSD_SCSILAYOUT | |||
| 104 | depends on NFSD_V4 && BLOCK | 104 | depends on NFSD_V4 && BLOCK |
| 105 | select NFSD_PNFS | 105 | select NFSD_PNFS |
| 106 | select EXPORTFS_BLOCK_OPS | 106 | select EXPORTFS_BLOCK_OPS |
| 107 | select BLK_SCSI_REQUEST | ||
| 107 | help | 108 | help |
| 108 | This option enables support for the exporting pNFS SCSI layouts | 109 | This option enables support for the exporting pNFS SCSI layouts |
| 109 | in the kernel's NFS server. The pNFS SCSI layout enables NFS | 110 | in the kernel's NFS server. The pNFS SCSI layout enables NFS |
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 0780ff864539..a06115e31612 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/nfsd/debug.h> | 10 | #include <linux/nfsd/debug.h> |
| 11 | #include <scsi/scsi_proto.h> | 11 | #include <scsi/scsi_proto.h> |
| 12 | #include <scsi/scsi_common.h> | 12 | #include <scsi/scsi_common.h> |
| 13 | #include <scsi/scsi_request.h> | ||
| 13 | 14 | ||
| 14 | #include "blocklayoutxdr.h" | 15 | #include "blocklayoutxdr.h" |
| 15 | #include "pnfs.h" | 16 | #include "pnfs.h" |
| @@ -213,6 +214,7 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev, | |||
| 213 | { | 214 | { |
| 214 | struct request_queue *q = bdev->bd_disk->queue; | 215 | struct request_queue *q = bdev->bd_disk->queue; |
| 215 | struct request *rq; | 216 | struct request *rq; |
| 217 | struct scsi_request *req; | ||
| 216 | size_t bufflen = 252, len, id_len; | 218 | size_t bufflen = 252, len, id_len; |
| 217 | u8 *buf, *d, type, assoc; | 219 | u8 *buf, *d, type, assoc; |
| 218 | int error; | 220 | int error; |
| @@ -221,23 +223,24 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev, | |||
| 221 | if (!buf) | 223 | if (!buf) |
| 222 | return -ENOMEM; | 224 | return -ENOMEM; |
| 223 | 225 | ||
| 224 | rq = blk_get_request(q, READ, GFP_KERNEL); | 226 | rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); |
| 225 | if (IS_ERR(rq)) { | 227 | if (IS_ERR(rq)) { |
| 226 | error = -ENOMEM; | 228 | error = -ENOMEM; |
| 227 | goto out_free_buf; | 229 | goto out_free_buf; |
| 228 | } | 230 | } |
| 229 | blk_rq_set_block_pc(rq); | 231 | req = scsi_req(rq); |
| 232 | scsi_req_init(rq); | ||
| 230 | 233 | ||
| 231 | error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL); | 234 | error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL); |
| 232 | if (error) | 235 | if (error) |
| 233 | goto out_put_request; | 236 | goto out_put_request; |
| 234 | 237 | ||
| 235 | rq->cmd[0] = INQUIRY; | 238 | req->cmd[0] = INQUIRY; |
| 236 | rq->cmd[1] = 1; | 239 | req->cmd[1] = 1; |
| 237 | rq->cmd[2] = 0x83; | 240 | req->cmd[2] = 0x83; |
| 238 | rq->cmd[3] = bufflen >> 8; | 241 | req->cmd[3] = bufflen >> 8; |
| 239 | rq->cmd[4] = bufflen & 0xff; | 242 | req->cmd[4] = bufflen & 0xff; |
| 240 | rq->cmd_len = COMMAND_SIZE(INQUIRY); | 243 | req->cmd_len = COMMAND_SIZE(INQUIRY); |
| 241 | 244 | ||
| 242 | error = blk_execute_rq(rq->q, NULL, rq, 1); | 245 | error = blk_execute_rq(rq->q, NULL, rq, 1); |
| 243 | if (error) { | 246 | if (error) { |
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 596205d939a1..e122da696f1b 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c | |||
| @@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, | |||
| 223 | struct nfs4_layout_stateid *ls; | 223 | struct nfs4_layout_stateid *ls; |
| 224 | struct nfs4_stid *stp; | 224 | struct nfs4_stid *stp; |
| 225 | 225 | ||
| 226 | stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); | 226 | stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache, |
| 227 | nfsd4_free_layout_stateid); | ||
| 227 | if (!stp) | 228 | if (!stp) |
| 228 | return NULL; | 229 | return NULL; |
| 229 | stp->sc_free = nfsd4_free_layout_stateid; | 230 | |
| 230 | get_nfs4_file(fp); | 231 | get_nfs4_file(fp); |
| 231 | stp->sc_file = fp; | 232 | stp->sc_file = fp; |
| 232 | 233 | ||
| @@ -613,6 +614,7 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) | |||
| 613 | { | 614 | { |
| 614 | struct nfs4_client *clp = ls->ls_stid.sc_client; | 615 | struct nfs4_client *clp = ls->ls_stid.sc_client; |
| 615 | char addr_str[INET6_ADDRSTRLEN]; | 616 | char addr_str[INET6_ADDRSTRLEN]; |
| 617 | static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed"; | ||
| 616 | static char *envp[] = { | 618 | static char *envp[] = { |
| 617 | "HOME=/", | 619 | "HOME=/", |
| 618 | "TERM=linux", | 620 | "TERM=linux", |
| @@ -628,12 +630,13 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) | |||
| 628 | "nfsd: client %s failed to respond to layout recall. " | 630 | "nfsd: client %s failed to respond to layout recall. " |
| 629 | " Fencing..\n", addr_str); | 631 | " Fencing..\n", addr_str); |
| 630 | 632 | ||
| 631 | argv[0] = "/sbin/nfsd-recall-failed"; | 633 | argv[0] = (char *)nfsd_recall_failed; |
| 632 | argv[1] = addr_str; | 634 | argv[1] = addr_str; |
| 633 | argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id; | 635 | argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id; |
| 634 | argv[3] = NULL; | 636 | argv[3] = NULL; |
| 635 | 637 | ||
| 636 | error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); | 638 | error = call_usermodehelper(nfsd_recall_failed, argv, envp, |
| 639 | UMH_WAIT_PROC); | ||
| 637 | if (error) { | 640 | if (error) { |
| 638 | printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n", | 641 | printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n", |
| 639 | addr_str, error); | 642 | addr_str, error); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 4b4beaaa4eaa..a0dee8ae9f97 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -633,8 +633,8 @@ out: | |||
| 633 | return co; | 633 | return co; |
| 634 | } | 634 | } |
| 635 | 635 | ||
| 636 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | 636 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, |
| 637 | struct kmem_cache *slab) | 637 | void (*sc_free)(struct nfs4_stid *)) |
| 638 | { | 638 | { |
| 639 | struct nfs4_stid *stid; | 639 | struct nfs4_stid *stid; |
| 640 | int new_id; | 640 | int new_id; |
| @@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | |||
| 650 | idr_preload_end(); | 650 | idr_preload_end(); |
| 651 | if (new_id < 0) | 651 | if (new_id < 0) |
| 652 | goto out_free; | 652 | goto out_free; |
| 653 | |||
| 654 | stid->sc_free = sc_free; | ||
| 653 | stid->sc_client = cl; | 655 | stid->sc_client = cl; |
| 654 | stid->sc_stateid.si_opaque.so_id = new_id; | 656 | stid->sc_stateid.si_opaque.so_id = new_id; |
| 655 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; | 657 | stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; |
| @@ -675,15 +677,12 @@ out_free: | |||
| 675 | static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) | 677 | static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) |
| 676 | { | 678 | { |
| 677 | struct nfs4_stid *stid; | 679 | struct nfs4_stid *stid; |
| 678 | struct nfs4_ol_stateid *stp; | ||
| 679 | 680 | ||
| 680 | stid = nfs4_alloc_stid(clp, stateid_slab); | 681 | stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); |
| 681 | if (!stid) | 682 | if (!stid) |
| 682 | return NULL; | 683 | return NULL; |
| 683 | 684 | ||
| 684 | stp = openlockstateid(stid); | 685 | return openlockstateid(stid); |
| 685 | stp->st_stid.sc_free = nfs4_free_ol_stateid; | ||
| 686 | return stp; | ||
| 687 | } | 686 | } |
| 688 | 687 | ||
| 689 | static void nfs4_free_deleg(struct nfs4_stid *stid) | 688 | static void nfs4_free_deleg(struct nfs4_stid *stid) |
| @@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, | |||
| 781 | goto out_dec; | 780 | goto out_dec; |
| 782 | if (delegation_blocked(¤t_fh->fh_handle)) | 781 | if (delegation_blocked(¤t_fh->fh_handle)) |
| 783 | goto out_dec; | 782 | goto out_dec; |
| 784 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); | 783 | dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg)); |
| 785 | if (dp == NULL) | 784 | if (dp == NULL) |
| 786 | goto out_dec; | 785 | goto out_dec; |
| 787 | 786 | ||
| 788 | dp->dl_stid.sc_free = nfs4_free_deleg; | ||
| 789 | /* | 787 | /* |
| 790 | * delegation seqid's are never incremented. The 4.1 special | 788 | * delegation seqid's are never incremented. The 4.1 special |
| 791 | * meaning of seqid 0 isn't meaningful, really, but let's avoid | 789 | * meaning of seqid 0 isn't meaningful, really, but let's avoid |
| @@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, | |||
| 5580 | stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); | 5578 | stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); |
| 5581 | get_nfs4_file(fp); | 5579 | get_nfs4_file(fp); |
| 5582 | stp->st_stid.sc_file = fp; | 5580 | stp->st_stid.sc_file = fp; |
| 5583 | stp->st_stid.sc_free = nfs4_free_lock_stateid; | ||
| 5584 | stp->st_access_bmap = 0; | 5581 | stp->st_access_bmap = 0; |
| 5585 | stp->st_deny_bmap = open_stp->st_deny_bmap; | 5582 | stp->st_deny_bmap = open_stp->st_deny_bmap; |
| 5586 | stp->st_openstp = open_stp; | 5583 | stp->st_openstp = open_stp; |
| @@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, | |||
| 5623 | lst = find_lock_stateid(lo, fi); | 5620 | lst = find_lock_stateid(lo, fi); |
| 5624 | if (lst == NULL) { | 5621 | if (lst == NULL) { |
| 5625 | spin_unlock(&clp->cl_lock); | 5622 | spin_unlock(&clp->cl_lock); |
| 5626 | ns = nfs4_alloc_stid(clp, stateid_slab); | 5623 | ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); |
| 5627 | if (ns == NULL) | 5624 | if (ns == NULL) |
| 5628 | return NULL; | 5625 | return NULL; |
| 5629 | 5626 | ||
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 7ecf16be4a44..8fae53ce21d1 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
| @@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, | |||
| 2440 | p++; /* to be backfilled later */ | 2440 | p++; /* to be backfilled later */ |
| 2441 | 2441 | ||
| 2442 | if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { | 2442 | if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { |
| 2443 | u32 *supp = nfsd_suppattrs[minorversion]; | 2443 | u32 supp[3]; |
| 2444 | |||
| 2445 | memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp)); | ||
| 2444 | 2446 | ||
| 2445 | if (!IS_POSIXACL(dentry->d_inode)) | 2447 | if (!IS_POSIXACL(dentry->d_inode)) |
| 2446 | supp[0] &= ~FATTR4_WORD0_ACL; | 2448 | supp[0] &= ~FATTR4_WORD0_ACL; |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index c9399366f9df..4516e8b7d776 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
| @@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, | |||
| 603 | __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, | 603 | __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, |
| 604 | stateid_t *stateid, unsigned char typemask, | 604 | stateid_t *stateid, unsigned char typemask, |
| 605 | struct nfs4_stid **s, struct nfsd_net *nn); | 605 | struct nfs4_stid **s, struct nfsd_net *nn); |
| 606 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | 606 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, |
| 607 | struct kmem_cache *slab); | 607 | void (*sc_free)(struct nfs4_stid *)); |
| 608 | void nfs4_unhash_stid(struct nfs4_stid *s); | 608 | void nfs4_unhash_stid(struct nfs4_stid *s); |
| 609 | void nfs4_put_stid(struct nfs4_stid *s); | 609 | void nfs4_put_stid(struct nfs4_stid *s); |
| 610 | void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); | 610 | void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 12eeae62a2b1..e1872f36147f 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
| @@ -1068,7 +1068,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 1068 | sb->s_time_gran = 1; | 1068 | sb->s_time_gran = 1; |
| 1069 | sb->s_max_links = NILFS_LINK_MAX; | 1069 | sb->s_max_links = NILFS_LINK_MAX; |
| 1070 | 1070 | ||
| 1071 | sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; | 1071 | sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info; |
| 1072 | 1072 | ||
| 1073 | err = load_nilfs(nilfs, sb); | 1073 | err = load_nilfs(nilfs, sb); |
| 1074 | if (err) | 1074 | if (err) |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index bbc175d4213d..a4c46221755e 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
| @@ -31,7 +31,6 @@ static bool should_merge(struct fsnotify_event *old_fsn, | |||
| 31 | static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) | 31 | static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) |
| 32 | { | 32 | { |
| 33 | struct fsnotify_event *test_event; | 33 | struct fsnotify_event *test_event; |
| 34 | bool do_merge = false; | ||
| 35 | 34 | ||
| 36 | pr_debug("%s: list=%p event=%p\n", __func__, list, event); | 35 | pr_debug("%s: list=%p event=%p\n", __func__, list, event); |
| 37 | 36 | ||
| @@ -47,16 +46,12 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) | |||
| 47 | 46 | ||
| 48 | list_for_each_entry_reverse(test_event, list, list) { | 47 | list_for_each_entry_reverse(test_event, list, list) { |
| 49 | if (should_merge(test_event, event)) { | 48 | if (should_merge(test_event, event)) { |
| 50 | do_merge = true; | 49 | test_event->mask |= event->mask; |
| 51 | break; | 50 | return 1; |
| 52 | } | 51 | } |
| 53 | } | 52 | } |
| 54 | 53 | ||
| 55 | if (!do_merge) | 54 | return 0; |
| 56 | return 0; | ||
| 57 | |||
| 58 | test_event->mask |= event->mask; | ||
| 59 | return 1; | ||
| 60 | } | 55 | } |
| 61 | 56 | ||
| 62 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 57 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index d3fea0bd89e2..6043306e8e21 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
| @@ -510,18 +510,6 @@ void fsnotify_detach_group_marks(struct fsnotify_group *group) | |||
| 510 | } | 510 | } |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old) | ||
| 514 | { | ||
| 515 | assert_spin_locked(&old->lock); | ||
| 516 | new->inode = old->inode; | ||
| 517 | new->mnt = old->mnt; | ||
| 518 | if (old->group) | ||
| 519 | fsnotify_get_group(old->group); | ||
| 520 | new->group = old->group; | ||
| 521 | new->mask = old->mask; | ||
| 522 | new->free_mark = old->free_mark; | ||
| 523 | } | ||
| 524 | |||
| 525 | /* | 513 | /* |
| 526 | * Nothing fancy, just initialize lists and locks and counters. | 514 | * Nothing fancy, just initialize lists and locks and counters. |
| 527 | */ | 515 | */ |
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 27d1242c8383..564c504d6efd 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c | |||
| @@ -349,7 +349,7 @@ static void sc_show_sock_container(struct seq_file *seq, | |||
| 349 | " func key: 0x%08x\n" | 349 | " func key: 0x%08x\n" |
| 350 | " func type: %u\n", | 350 | " func type: %u\n", |
| 351 | sc, | 351 | sc, |
| 352 | atomic_read(&sc->sc_kref.refcount), | 352 | kref_read(&sc->sc_kref), |
| 353 | &saddr, inet ? ntohs(sport) : 0, | 353 | &saddr, inet ? ntohs(sport) : 0, |
| 354 | &daddr, inet ? ntohs(dport) : 0, | 354 | &daddr, inet ? ntohs(dport) : 0, |
| 355 | sc->sc_node->nd_name, | 355 | sc->sc_node->nd_name, |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index d4b5c81f0445..ec000575e863 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
| @@ -97,7 +97,7 @@ | |||
| 97 | typeof(sc) __sc = (sc); \ | 97 | typeof(sc) __sc = (sc); \ |
| 98 | mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \ | 98 | mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \ |
| 99 | "pg_off %zu] " fmt, __sc, \ | 99 | "pg_off %zu] " fmt, __sc, \ |
| 100 | atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \ | 100 | kref_read(&__sc->sc_kref), __sc->sc_sock, \ |
| 101 | __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \ | 101 | __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \ |
| 102 | ##args); \ | 102 | ##args); \ |
| 103 | } while (0) | 103 | } while (0) |
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index e7b760deefae..9b984cae4c4e 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
| @@ -81,7 +81,7 @@ static void __dlm_print_lock(struct dlm_lock *lock) | |||
| 81 | lock->ml.type, lock->ml.convert_type, lock->ml.node, | 81 | lock->ml.type, lock->ml.convert_type, lock->ml.node, |
| 82 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | 82 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), |
| 83 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | 83 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), |
| 84 | atomic_read(&lock->lock_refs.refcount), | 84 | kref_read(&lock->lock_refs), |
| 85 | (list_empty(&lock->ast_list) ? 'y' : 'n'), | 85 | (list_empty(&lock->ast_list) ? 'y' : 'n'), |
| 86 | (lock->ast_pending ? 'y' : 'n'), | 86 | (lock->ast_pending ? 'y' : 'n'), |
| 87 | (list_empty(&lock->bast_list) ? 'y' : 'n'), | 87 | (list_empty(&lock->bast_list) ? 'y' : 'n'), |
| @@ -106,7 +106,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) | |||
| 106 | printk("lockres: %s, owner=%u, state=%u\n", | 106 | printk("lockres: %s, owner=%u, state=%u\n", |
| 107 | buf, res->owner, res->state); | 107 | buf, res->owner, res->state); |
| 108 | printk(" last used: %lu, refcnt: %u, on purge list: %s\n", | 108 | printk(" last used: %lu, refcnt: %u, on purge list: %s\n", |
| 109 | res->last_used, atomic_read(&res->refs.refcount), | 109 | res->last_used, kref_read(&res->refs), |
| 110 | list_empty(&res->purge) ? "no" : "yes"); | 110 | list_empty(&res->purge) ? "no" : "yes"); |
| 111 | printk(" on dirty list: %s, on reco list: %s, " | 111 | printk(" on dirty list: %s, on reco list: %s, " |
| 112 | "migrating pending: %s\n", | 112 | "migrating pending: %s\n", |
| @@ -298,7 +298,7 @@ static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len) | |||
| 298 | mle_type, mle->master, mle->new_master, | 298 | mle_type, mle->master, mle->new_master, |
| 299 | !list_empty(&mle->hb_events), | 299 | !list_empty(&mle->hb_events), |
| 300 | !!mle->inuse, | 300 | !!mle->inuse, |
| 301 | atomic_read(&mle->mle_refs.refcount)); | 301 | kref_read(&mle->mle_refs)); |
| 302 | 302 | ||
| 303 | out += snprintf(buf + out, len - out, "Maybe="); | 303 | out += snprintf(buf + out, len - out, "Maybe="); |
| 304 | out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES, | 304 | out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES, |
| @@ -494,7 +494,7 @@ static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len) | |||
| 494 | lock->ast_pending, lock->bast_pending, | 494 | lock->ast_pending, lock->bast_pending, |
| 495 | lock->convert_pending, lock->lock_pending, | 495 | lock->convert_pending, lock->lock_pending, |
| 496 | lock->cancel_pending, lock->unlock_pending, | 496 | lock->cancel_pending, lock->unlock_pending, |
| 497 | atomic_read(&lock->lock_refs.refcount)); | 497 | kref_read(&lock->lock_refs)); |
| 498 | spin_unlock(&lock->spinlock); | 498 | spin_unlock(&lock->spinlock); |
| 499 | 499 | ||
| 500 | return out; | 500 | return out; |
| @@ -521,7 +521,7 @@ static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len) | |||
| 521 | !list_empty(&res->recovering), | 521 | !list_empty(&res->recovering), |
| 522 | res->inflight_locks, res->migration_pending, | 522 | res->inflight_locks, res->migration_pending, |
| 523 | atomic_read(&res->asts_reserved), | 523 | atomic_read(&res->asts_reserved), |
| 524 | atomic_read(&res->refs.refcount)); | 524 | kref_read(&res->refs)); |
| 525 | 525 | ||
| 526 | /* refmap */ | 526 | /* refmap */ |
| 527 | out += snprintf(buf + out, len - out, "RMAP:"); | 527 | out += snprintf(buf + out, len - out, "RMAP:"); |
| @@ -777,7 +777,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) | |||
| 777 | /* Purge Count: xxx Refs: xxx */ | 777 | /* Purge Count: xxx Refs: xxx */ |
| 778 | out += snprintf(buf + out, len - out, | 778 | out += snprintf(buf + out, len - out, |
| 779 | "Purge Count: %d Refs: %d\n", dlm->purge_count, | 779 | "Purge Count: %d Refs: %d\n", dlm->purge_count, |
| 780 | atomic_read(&dlm->dlm_refs.refcount)); | 780 | kref_read(&dlm->dlm_refs)); |
| 781 | 781 | ||
| 782 | /* Dead Node: xxx */ | 782 | /* Dead Node: xxx */ |
| 783 | out += snprintf(buf + out, len - out, | 783 | out += snprintf(buf + out, len - out, |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 733e4e79c8e2..32fd261ae13d 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
| @@ -2072,7 +2072,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | |||
| 2072 | INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); | 2072 | INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); |
| 2073 | 2073 | ||
| 2074 | mlog(0, "context init: refcount %u\n", | 2074 | mlog(0, "context init: refcount %u\n", |
| 2075 | atomic_read(&dlm->dlm_refs.refcount)); | 2075 | kref_read(&dlm->dlm_refs)); |
| 2076 | 2076 | ||
| 2077 | leave: | 2077 | leave: |
| 2078 | if (ret < 0 && dlm) { | 2078 | if (ret < 0 && dlm) { |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a464c8088170..7025d8c27999 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
| @@ -233,7 +233,7 @@ static void __dlm_put_mle(struct dlm_master_list_entry *mle) | |||
| 233 | 233 | ||
| 234 | assert_spin_locked(&dlm->spinlock); | 234 | assert_spin_locked(&dlm->spinlock); |
| 235 | assert_spin_locked(&dlm->master_lock); | 235 | assert_spin_locked(&dlm->master_lock); |
| 236 | if (!atomic_read(&mle->mle_refs.refcount)) { | 236 | if (!kref_read(&mle->mle_refs)) { |
| 237 | /* this may or may not crash, but who cares. | 237 | /* this may or may not crash, but who cares. |
| 238 | * it's a BUG. */ | 238 | * it's a BUG. */ |
| 239 | mlog(ML_ERROR, "bad mle: %p\n", mle); | 239 | mlog(ML_ERROR, "bad mle: %p\n", mle); |
| @@ -1124,9 +1124,9 @@ recheck: | |||
| 1124 | unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); | 1124 | unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); |
| 1125 | 1125 | ||
| 1126 | /* | 1126 | /* |
| 1127 | if (atomic_read(&mle->mle_refs.refcount) < 2) | 1127 | if (kref_read(&mle->mle_refs) < 2) |
| 1128 | mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, | 1128 | mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, |
| 1129 | atomic_read(&mle->mle_refs.refcount), | 1129 | kref_read(&mle->mle_refs), |
| 1130 | res->lockname.len, res->lockname.name); | 1130 | res->lockname.len, res->lockname.name); |
| 1131 | */ | 1131 | */ |
| 1132 | atomic_set(&mle->woken, 0); | 1132 | atomic_set(&mle->woken, 0); |
| @@ -1979,7 +1979,7 @@ ok: | |||
| 1979 | * on this mle. */ | 1979 | * on this mle. */ |
| 1980 | spin_lock(&dlm->master_lock); | 1980 | spin_lock(&dlm->master_lock); |
| 1981 | 1981 | ||
| 1982 | rr = atomic_read(&mle->mle_refs.refcount); | 1982 | rr = kref_read(&mle->mle_refs); |
| 1983 | if (mle->inuse > 0) { | 1983 | if (mle->inuse > 0) { |
| 1984 | if (extra_ref && rr < 3) | 1984 | if (extra_ref && rr < 3) |
| 1985 | err = 1; | 1985 | err = 1; |
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 1082b2c3014b..63d701cd1e2e 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
| @@ -251,7 +251,7 @@ leave: | |||
| 251 | mlog(0, "lock %u:%llu should be gone now! refs=%d\n", | 251 | mlog(0, "lock %u:%llu should be gone now! refs=%d\n", |
| 252 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | 252 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), |
| 253 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | 253 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), |
| 254 | atomic_read(&lock->lock_refs.refcount)-1); | 254 | kref_read(&lock->lock_refs)-1); |
| 255 | dlm_lock_put(lock); | 255 | dlm_lock_put(lock); |
| 256 | } | 256 | } |
| 257 | if (actions & DLM_UNLOCK_CALL_AST) | 257 | if (actions & DLM_UNLOCK_CALL_AST) |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 83d576f6a287..77d1632e905d 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
| @@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, | |||
| 3303 | mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, | 3303 | mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, |
| 3304 | lockres->l_level, new_level); | 3304 | lockres->l_level, new_level); |
| 3305 | 3305 | ||
| 3306 | /* | ||
| 3307 | * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always | ||
| 3308 | * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that | ||
| 3309 | * we can recover correctly from node failure. Otherwise, we may get | ||
| 3310 | * invalid LVB in LKB, but without DLM_SBF_VALNOTVALIDÂ being set. | ||
| 3311 | */ | ||
| 3312 | if (!ocfs2_is_o2cb_active() && | ||
| 3313 | lockres->l_ops->flags & LOCK_TYPE_USES_LVB) | ||
| 3314 | lvb = 1; | ||
| 3315 | |||
| 3306 | if (lvb) | 3316 | if (lvb) |
| 3307 | dlm_flags |= DLM_LKF_VALBLK; | 3317 | dlm_flags |= DLM_LKF_VALBLK; |
| 3308 | 3318 | ||
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 52c07346bea3..820359096c7a 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c | |||
| @@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; | |||
| 48 | */ | 48 | */ |
| 49 | static struct ocfs2_stack_plugin *active_stack; | 49 | static struct ocfs2_stack_plugin *active_stack; |
| 50 | 50 | ||
| 51 | inline int ocfs2_is_o2cb_active(void) | ||
| 52 | { | ||
| 53 | return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); | ||
| 54 | } | ||
| 55 | EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); | ||
| 56 | |||
| 51 | static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) | 57 | static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) |
| 52 | { | 58 | { |
| 53 | struct ocfs2_stack_plugin *p; | 59 | struct ocfs2_stack_plugin *p; |
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index f2dce10fae54..e3036e1790e8 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h | |||
| @@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p | |||
| 298 | int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); | 298 | int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); |
| 299 | void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); | 299 | void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); |
| 300 | 300 | ||
| 301 | /* In ocfs2_downconvert_lock(), we need to know which stack we are using */ | ||
| 302 | int ocfs2_is_o2cb_active(void); | ||
| 303 | |||
| 301 | extern struct kset *ocfs2_kset; | 304 | extern struct kset *ocfs2_kset; |
| 302 | 305 | ||
| 303 | #endif /* STACKGLUE_H */ | 306 | #endif /* STACKGLUE_H */ |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 9ad48d9202a9..023bb0b03352 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
| @@ -154,29 +154,38 @@ out_err: | |||
| 154 | static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, | 154 | static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, |
| 155 | struct dentry **ret) | 155 | struct dentry **ret) |
| 156 | { | 156 | { |
| 157 | const char *s = d->name.name; | 157 | /* Counting down from the end, since the prefix can change */ |
| 158 | size_t rem = d->name.len - 1; | ||
| 158 | struct dentry *dentry = NULL; | 159 | struct dentry *dentry = NULL; |
| 159 | int err; | 160 | int err; |
| 160 | 161 | ||
| 161 | if (*s != '/') | 162 | if (d->name.name[0] != '/') |
| 162 | return ovl_lookup_single(base, d, d->name.name, d->name.len, | 163 | return ovl_lookup_single(base, d, d->name.name, d->name.len, |
| 163 | 0, "", ret); | 164 | 0, "", ret); |
| 164 | 165 | ||
| 165 | while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) { | 166 | while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { |
| 167 | const char *s = d->name.name + d->name.len - rem; | ||
| 166 | const char *next = strchrnul(s, '/'); | 168 | const char *next = strchrnul(s, '/'); |
| 167 | size_t slen = strlen(s); | 169 | size_t thislen = next - s; |
| 170 | bool end = !next[0]; | ||
| 168 | 171 | ||
| 169 | if (WARN_ON(slen > d->name.len) || | 172 | /* Verify we did not go off the rails */ |
| 170 | WARN_ON(strcmp(d->name.name + d->name.len - slen, s))) | 173 | if (WARN_ON(s[-1] != '/')) |
| 171 | return -EIO; | 174 | return -EIO; |
| 172 | 175 | ||
| 173 | err = ovl_lookup_single(base, d, s, next - s, | 176 | err = ovl_lookup_single(base, d, s, thislen, |
| 174 | d->name.len - slen, next, &base); | 177 | d->name.len - rem, next, &base); |
| 175 | dput(dentry); | 178 | dput(dentry); |
| 176 | if (err) | 179 | if (err) |
| 177 | return err; | 180 | return err; |
| 178 | dentry = base; | 181 | dentry = base; |
| 179 | s = next; | 182 | if (end) |
| 183 | break; | ||
| 184 | |||
| 185 | rem -= thislen + 1; | ||
| 186 | |||
| 187 | if (WARN_ON(rem >= d->name.len)) | ||
| 188 | return -EIO; | ||
| 180 | } | 189 | } |
| 181 | *ret = dentry; | 190 | *ret = dentry; |
| 182 | return 0; | 191 | return 0; |
diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 595522022aca..c9d48dc78495 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c | |||
| @@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
| 922 | int error; | 922 | int error; |
| 923 | 923 | ||
| 924 | if (type == ACL_TYPE_ACCESS) { | 924 | if (type == ACL_TYPE_ACCESS) { |
| 925 | error = posix_acl_equiv_mode(acl, &inode->i_mode); | 925 | error = posix_acl_update_mode(inode, |
| 926 | if (error < 0) | 926 | &inode->i_mode, &acl); |
| 927 | return 0; | 927 | if (error) |
| 928 | if (error == 0) | 928 | return error; |
| 929 | acl = NULL; | ||
| 930 | } | 929 | } |
| 931 | 930 | ||
| 932 | inode->i_ctime = current_time(inode); | 931 | inode->i_ctime = current_time(inode); |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 51a4213afa2e..fe12b519d09b 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
| @@ -401,8 +401,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
| 401 | unsigned long long start_time; | 401 | unsigned long long start_time; |
| 402 | unsigned long cmin_flt = 0, cmaj_flt = 0; | 402 | unsigned long cmin_flt = 0, cmaj_flt = 0; |
| 403 | unsigned long min_flt = 0, maj_flt = 0; | 403 | unsigned long min_flt = 0, maj_flt = 0; |
| 404 | cputime_t cutime, cstime, utime, stime; | 404 | u64 cutime, cstime, utime, stime; |
| 405 | cputime_t cgtime, gtime; | 405 | u64 cgtime, gtime; |
| 406 | unsigned long rsslim = 0; | 406 | unsigned long rsslim = 0; |
| 407 | char tcomm[sizeof(task->comm)]; | 407 | char tcomm[sizeof(task->comm)]; |
| 408 | unsigned long flags; | 408 | unsigned long flags; |
| @@ -497,10 +497,10 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
| 497 | seq_put_decimal_ull(m, " ", cmin_flt); | 497 | seq_put_decimal_ull(m, " ", cmin_flt); |
| 498 | seq_put_decimal_ull(m, " ", maj_flt); | 498 | seq_put_decimal_ull(m, " ", maj_flt); |
| 499 | seq_put_decimal_ull(m, " ", cmaj_flt); | 499 | seq_put_decimal_ull(m, " ", cmaj_flt); |
| 500 | seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime)); | 500 | seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime)); |
| 501 | seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime)); | 501 | seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime)); |
| 502 | seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime)); | 502 | seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime)); |
| 503 | seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime)); | 503 | seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime)); |
| 504 | seq_put_decimal_ll(m, " ", priority); | 504 | seq_put_decimal_ll(m, " ", priority); |
| 505 | seq_put_decimal_ll(m, " ", nice); | 505 | seq_put_decimal_ll(m, " ", nice); |
| 506 | seq_put_decimal_ll(m, " ", num_threads); | 506 | seq_put_decimal_ll(m, " ", num_threads); |
| @@ -542,8 +542,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
| 542 | seq_put_decimal_ull(m, " ", task->rt_priority); | 542 | seq_put_decimal_ull(m, " ", task->rt_priority); |
| 543 | seq_put_decimal_ull(m, " ", task->policy); | 543 | seq_put_decimal_ull(m, " ", task->policy); |
| 544 | seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task)); | 544 | seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task)); |
| 545 | seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime)); | 545 | seq_put_decimal_ull(m, " ", nsec_to_clock_t(gtime)); |
| 546 | seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime)); | 546 | seq_put_decimal_ll(m, " ", nsec_to_clock_t(cgtime)); |
| 547 | 547 | ||
| 548 | if (mm && permitted) { | 548 | if (mm && permitted) { |
| 549 | seq_put_decimal_ull(m, " ", mm->start_data); | 549 | seq_put_decimal_ull(m, " ", mm->start_data); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 8e7e61b28f31..3d773eb9e144 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -2179,7 +2179,7 @@ static const struct file_operations proc_map_files_operations = { | |||
| 2179 | .llseek = generic_file_llseek, | 2179 | .llseek = generic_file_llseek, |
| 2180 | }; | 2180 | }; |
| 2181 | 2181 | ||
| 2182 | #ifdef CONFIG_CHECKPOINT_RESTORE | 2182 | #if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS) |
| 2183 | struct timers_private { | 2183 | struct timers_private { |
| 2184 | struct pid *pid; | 2184 | struct pid *pid; |
| 2185 | struct task_struct *task; | 2185 | struct task_struct *task; |
| @@ -2488,6 +2488,12 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, | |||
| 2488 | length = -ESRCH; | 2488 | length = -ESRCH; |
| 2489 | if (!task) | 2489 | if (!task) |
| 2490 | goto out_no_task; | 2490 | goto out_no_task; |
| 2491 | |||
| 2492 | /* A task may only write its own attributes. */ | ||
| 2493 | length = -EACCES; | ||
| 2494 | if (current != task) | ||
| 2495 | goto out; | ||
| 2496 | |||
| 2491 | if (count > PAGE_SIZE) | 2497 | if (count > PAGE_SIZE) |
| 2492 | count = PAGE_SIZE; | 2498 | count = PAGE_SIZE; |
| 2493 | 2499 | ||
| @@ -2503,14 +2509,13 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, | |||
| 2503 | } | 2509 | } |
| 2504 | 2510 | ||
| 2505 | /* Guard against adverse ptrace interaction */ | 2511 | /* Guard against adverse ptrace interaction */ |
| 2506 | length = mutex_lock_interruptible(&task->signal->cred_guard_mutex); | 2512 | length = mutex_lock_interruptible(¤t->signal->cred_guard_mutex); |
| 2507 | if (length < 0) | 2513 | if (length < 0) |
| 2508 | goto out_free; | 2514 | goto out_free; |
| 2509 | 2515 | ||
| 2510 | length = security_setprocattr(task, | 2516 | length = security_setprocattr(file->f_path.dentry->d_name.name, |
| 2511 | (char*)file->f_path.dentry->d_name.name, | ||
| 2512 | page, count); | 2517 | page, count); |
| 2513 | mutex_unlock(&task->signal->cred_guard_mutex); | 2518 | mutex_unlock(¤t->signal->cred_guard_mutex); |
| 2514 | out_free: | 2519 | out_free: |
| 2515 | kfree(page); | 2520 | kfree(page); |
| 2516 | out: | 2521 | out: |
| @@ -2936,7 +2941,7 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
| 2936 | REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), | 2941 | REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), |
| 2937 | REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), | 2942 | REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), |
| 2938 | #endif | 2943 | #endif |
| 2939 | #ifdef CONFIG_CHECKPOINT_RESTORE | 2944 | #if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS) |
| 2940 | REG("timers", S_IRUGO, proc_timers_operations), | 2945 | REG("timers", S_IRUGO, proc_timers_operations), |
| 2941 | #endif | 2946 | #endif |
| 2942 | REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), | 2947 | REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), |
| @@ -3179,6 +3184,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) | |||
| 3179 | iter.tgid += 1, iter = next_tgid(ns, iter)) { | 3184 | iter.tgid += 1, iter = next_tgid(ns, iter)) { |
| 3180 | char name[PROC_NUMBUF]; | 3185 | char name[PROC_NUMBUF]; |
| 3181 | int len; | 3186 | int len; |
| 3187 | |||
| 3188 | cond_resched(); | ||
| 3182 | if (!has_pid_permissions(ns, iter.task, 2)) | 3189 | if (!has_pid_permissions(ns, iter.task, 2)) |
| 3183 | continue; | 3190 | continue; |
| 3184 | 3191 | ||
diff --git a/fs/proc/page.c b/fs/proc/page.c index a2066e6dee90..2726536489b1 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
| @@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page) | |||
| 173 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); | 173 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); |
| 174 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); | 174 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); |
| 175 | 175 | ||
| 176 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); | 176 | if (PageSwapCache(page)) |
| 177 | u |= 1 << KPF_SWAPCACHE; | ||
| 177 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); | 178 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); |
| 178 | 179 | ||
| 179 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); | 180 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 55313d994895..d4e37acd4821 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
| @@ -709,7 +709,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx) | |||
| 709 | ctl_dir = container_of(head, struct ctl_dir, header); | 709 | ctl_dir = container_of(head, struct ctl_dir, header); |
| 710 | 710 | ||
| 711 | if (!dir_emit_dots(file, ctx)) | 711 | if (!dir_emit_dots(file, ctx)) |
| 712 | return 0; | 712 | goto out; |
| 713 | 713 | ||
| 714 | pos = 2; | 714 | pos = 2; |
| 715 | 715 | ||
| @@ -719,6 +719,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx) | |||
| 719 | break; | 719 | break; |
| 720 | } | 720 | } |
| 721 | } | 721 | } |
| 722 | out: | ||
| 722 | sysctl_head_finish(head); | 723 | sysctl_head_finish(head); |
| 723 | return 0; | 724 | return 0; |
| 724 | } | 725 | } |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index d700c42b3572..e47c3e8c4dfe 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
| @@ -21,9 +21,9 @@ | |||
| 21 | 21 | ||
| 22 | #ifdef arch_idle_time | 22 | #ifdef arch_idle_time |
| 23 | 23 | ||
| 24 | static cputime64_t get_idle_time(int cpu) | 24 | static u64 get_idle_time(int cpu) |
| 25 | { | 25 | { |
| 26 | cputime64_t idle; | 26 | u64 idle; |
| 27 | 27 | ||
| 28 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; | 28 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; |
| 29 | if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) | 29 | if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) |
| @@ -31,9 +31,9 @@ static cputime64_t get_idle_time(int cpu) | |||
| 31 | return idle; | 31 | return idle; |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static cputime64_t get_iowait_time(int cpu) | 34 | static u64 get_iowait_time(int cpu) |
| 35 | { | 35 | { |
| 36 | cputime64_t iowait; | 36 | u64 iowait; |
| 37 | 37 | ||
| 38 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; | 38 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
| 39 | if (cpu_online(cpu) && nr_iowait_cpu(cpu)) | 39 | if (cpu_online(cpu) && nr_iowait_cpu(cpu)) |
| @@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int cpu) | |||
| 45 | 45 | ||
| 46 | static u64 get_idle_time(int cpu) | 46 | static u64 get_idle_time(int cpu) |
| 47 | { | 47 | { |
| 48 | u64 idle, idle_time = -1ULL; | 48 | u64 idle, idle_usecs = -1ULL; |
| 49 | 49 | ||
| 50 | if (cpu_online(cpu)) | 50 | if (cpu_online(cpu)) |
| 51 | idle_time = get_cpu_idle_time_us(cpu, NULL); | 51 | idle_usecs = get_cpu_idle_time_us(cpu, NULL); |
| 52 | 52 | ||
| 53 | if (idle_time == -1ULL) | 53 | if (idle_usecs == -1ULL) |
| 54 | /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ | 54 | /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ |
| 55 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; | 55 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; |
| 56 | else | 56 | else |
| 57 | idle = usecs_to_cputime64(idle_time); | 57 | idle = idle_usecs * NSEC_PER_USEC; |
| 58 | 58 | ||
| 59 | return idle; | 59 | return idle; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static u64 get_iowait_time(int cpu) | 62 | static u64 get_iowait_time(int cpu) |
| 63 | { | 63 | { |
| 64 | u64 iowait, iowait_time = -1ULL; | 64 | u64 iowait, iowait_usecs = -1ULL; |
| 65 | 65 | ||
| 66 | if (cpu_online(cpu)) | 66 | if (cpu_online(cpu)) |
| 67 | iowait_time = get_cpu_iowait_time_us(cpu, NULL); | 67 | iowait_usecs = get_cpu_iowait_time_us(cpu, NULL); |
| 68 | 68 | ||
| 69 | if (iowait_time == -1ULL) | 69 | if (iowait_usecs == -1ULL) |
| 70 | /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ | 70 | /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ |
| 71 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; | 71 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
| 72 | else | 72 | else |
| 73 | iowait = usecs_to_cputime64(iowait_time); | 73 | iowait = iowait_usecs * NSEC_PER_USEC; |
| 74 | 74 | ||
| 75 | return iowait; | 75 | return iowait; |
| 76 | } | 76 | } |
| @@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p, void *v) | |||
| 115 | } | 115 | } |
| 116 | sum += arch_irq_stat(); | 116 | sum += arch_irq_stat(); |
| 117 | 117 | ||
| 118 | seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user)); | 118 | seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user)); |
| 119 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); | 119 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); |
| 120 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); | 120 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); |
| 121 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); | 121 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); |
| 122 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); | 122 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); |
| 123 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); | 123 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); |
| 124 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); | 124 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); |
| 125 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); | 125 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); |
| 126 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); | 126 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); |
| 127 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); | 127 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); |
| 128 | seq_putc(p, '\n'); | 128 | seq_putc(p, '\n'); |
| 129 | 129 | ||
| 130 | for_each_online_cpu(i) { | 130 | for_each_online_cpu(i) { |
| @@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p, void *v) | |||
| 140 | guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; | 140 | guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; |
| 141 | guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; | 141 | guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; |
| 142 | seq_printf(p, "cpu%d", i); | 142 | seq_printf(p, "cpu%d", i); |
| 143 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user)); | 143 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); |
| 144 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); | 144 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); |
| 145 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); | 145 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); |
| 146 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); | 146 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); |
| 147 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); | 147 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); |
| 148 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); | 148 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); |
| 149 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); | 149 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); |
| 150 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); | 150 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); |
| 151 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); | 151 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); |
| 152 | seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); | 152 | seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); |
| 153 | seq_putc(p, '\n'); | 153 | seq_putc(p, '\n'); |
| 154 | } | 154 | } |
| 155 | seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); | 155 | seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); |
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 33de567c25af..7981c4ffe787 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c | |||
| @@ -5,23 +5,20 @@ | |||
| 5 | #include <linux/seq_file.h> | 5 | #include <linux/seq_file.h> |
| 6 | #include <linux/time.h> | 6 | #include <linux/time.h> |
| 7 | #include <linux/kernel_stat.h> | 7 | #include <linux/kernel_stat.h> |
| 8 | #include <linux/cputime.h> | ||
| 9 | 8 | ||
| 10 | static int uptime_proc_show(struct seq_file *m, void *v) | 9 | static int uptime_proc_show(struct seq_file *m, void *v) |
| 11 | { | 10 | { |
| 12 | struct timespec uptime; | 11 | struct timespec uptime; |
| 13 | struct timespec idle; | 12 | struct timespec idle; |
| 14 | u64 idletime; | ||
| 15 | u64 nsec; | 13 | u64 nsec; |
| 16 | u32 rem; | 14 | u32 rem; |
| 17 | int i; | 15 | int i; |
| 18 | 16 | ||
| 19 | idletime = 0; | 17 | nsec = 0; |
| 20 | for_each_possible_cpu(i) | 18 | for_each_possible_cpu(i) |
| 21 | idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; | 19 | nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; |
| 22 | 20 | ||
| 23 | get_monotonic_boottime(&uptime); | 21 | get_monotonic_boottime(&uptime); |
| 24 | nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC; | ||
| 25 | idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); | 22 | idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); |
| 26 | idle.tv_nsec = rem; | 23 | idle.tv_nsec = rem; |
| 27 | seq_printf(m, "%lu.%02lu %lu.%02lu\n", | 24 | seq_printf(m, "%lu.%02lu %lu.%02lu\n", |
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 27c059e1760a..11f918d34b1e 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c | |||
| @@ -133,7 +133,8 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max, | |||
| 133 | struct persistent_ram_zone *prz; | 133 | struct persistent_ram_zone *prz; |
| 134 | int i = (*c)++; | 134 | int i = (*c)++; |
| 135 | 135 | ||
| 136 | if (i >= max) | 136 | /* Give up if we never existed or have hit the end. */ |
| 137 | if (!przs || i >= max) | ||
| 137 | return NULL; | 138 | return NULL; |
| 138 | 139 | ||
| 139 | prz = przs[i]; | 140 | prz = przs[i]; |
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index a857338b7dab..bc927e30bdcc 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c | |||
| @@ -467,8 +467,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, | |||
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, | 469 | static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, |
| 470 | struct persistent_ram_ecc_info *ecc_info, | 470 | struct persistent_ram_ecc_info *ecc_info) |
| 471 | unsigned long flags) | ||
| 472 | { | 471 | { |
| 473 | int ret; | 472 | int ret; |
| 474 | 473 | ||
| @@ -494,10 +493,9 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, | |||
| 494 | prz->buffer->sig); | 493 | prz->buffer->sig); |
| 495 | } | 494 | } |
| 496 | 495 | ||
| 496 | /* Rewind missing or invalid memory area. */ | ||
| 497 | prz->buffer->sig = sig; | 497 | prz->buffer->sig = sig; |
| 498 | persistent_ram_zap(prz); | 498 | persistent_ram_zap(prz); |
| 499 | prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock); | ||
| 500 | prz->flags = flags; | ||
| 501 | 499 | ||
| 502 | return 0; | 500 | return 0; |
| 503 | } | 501 | } |
| @@ -533,11 +531,15 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, | |||
| 533 | goto err; | 531 | goto err; |
| 534 | } | 532 | } |
| 535 | 533 | ||
| 534 | /* Initialize general buffer state. */ | ||
| 535 | prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock); | ||
| 536 | prz->flags = flags; | ||
| 537 | |||
| 536 | ret = persistent_ram_buffer_map(start, size, prz, memtype); | 538 | ret = persistent_ram_buffer_map(start, size, prz, memtype); |
| 537 | if (ret) | 539 | if (ret) |
| 538 | goto err; | 540 | goto err; |
| 539 | 541 | ||
| 540 | ret = persistent_ram_post_init(prz, sig, ecc_info, flags); | 542 | ret = persistent_ram_post_init(prz, sig, ecc_info); |
| 541 | if (ret) | 543 | if (ret) |
| 542 | goto err; | 544 | goto err; |
| 543 | 545 | ||
diff --git a/fs/romfs/super.c b/fs/romfs/super.c index d0f8a38dfafa..0186fe6d39f3 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c | |||
| @@ -74,6 +74,7 @@ | |||
| 74 | #include <linux/highmem.h> | 74 | #include <linux/highmem.h> |
| 75 | #include <linux/pagemap.h> | 75 | #include <linux/pagemap.h> |
| 76 | #include <linux/uaccess.h> | 76 | #include <linux/uaccess.h> |
| 77 | #include <linux/major.h> | ||
| 77 | #include "internal.h" | 78 | #include "internal.h" |
| 78 | 79 | ||
| 79 | static struct kmem_cache *romfs_inode_cachep; | 80 | static struct kmem_cache *romfs_inode_cachep; |
| @@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode) | |||
| 416 | static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) | 417 | static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
| 417 | { | 418 | { |
| 418 | struct super_block *sb = dentry->d_sb; | 419 | struct super_block *sb = dentry->d_sb; |
| 419 | u64 id = huge_encode_dev(sb->s_bdev->bd_dev); | 420 | u64 id = 0; |
| 421 | |||
| 422 | /* When calling huge_encode_dev(), | ||
| 423 | * use sb->s_bdev->bd_dev when, | ||
| 424 | * - CONFIG_ROMFS_ON_BLOCK defined | ||
| 425 | * use sb->s_dev when, | ||
| 426 | * - CONFIG_ROMFS_ON_BLOCK undefined and | ||
| 427 | * - CONFIG_ROMFS_ON_MTD defined | ||
| 428 | * leave id as 0 when, | ||
| 429 | * - CONFIG_ROMFS_ON_BLOCK undefined and | ||
| 430 | * - CONFIG_ROMFS_ON_MTD undefined | ||
| 431 | */ | ||
| 432 | if (sb->s_bdev) | ||
| 433 | id = huge_encode_dev(sb->s_bdev->bd_dev); | ||
| 434 | else if (sb->s_dev) | ||
| 435 | id = huge_encode_dev(sb->s_dev); | ||
| 420 | 436 | ||
| 421 | buf->f_type = ROMFS_MAGIC; | 437 | buf->f_type = ROMFS_MAGIC; |
| 422 | buf->f_namelen = ROMFS_MAXFN; | 438 | buf->f_namelen = ROMFS_MAXFN; |
| @@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 489 | sb->s_flags |= MS_RDONLY | MS_NOATIME; | 505 | sb->s_flags |= MS_RDONLY | MS_NOATIME; |
| 490 | sb->s_op = &romfs_super_ops; | 506 | sb->s_op = &romfs_super_ops; |
| 491 | 507 | ||
| 508 | #ifdef CONFIG_ROMFS_ON_MTD | ||
| 509 | /* Use same dev ID from the underlying mtdblock device */ | ||
| 510 | if (sb->s_mtd) | ||
| 511 | sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); | ||
| 512 | #endif | ||
| 492 | /* read the image superblock and check it */ | 513 | /* read the image superblock and check it */ |
| 493 | rsb = kmalloc(512, GFP_KERNEL); | 514 | rsb = kmalloc(512, GFP_KERNEL); |
| 494 | if (!rsb) | 515 | if (!rsb) |
diff --git a/fs/splice.c b/fs/splice.c index 873d83104e79..4ef78aa8ef61 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -204,6 +204,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, | |||
| 204 | buf->len = spd->partial[page_nr].len; | 204 | buf->len = spd->partial[page_nr].len; |
| 205 | buf->private = spd->partial[page_nr].private; | 205 | buf->private = spd->partial[page_nr].private; |
| 206 | buf->ops = spd->ops; | 206 | buf->ops = spd->ops; |
| 207 | buf->flags = 0; | ||
| 207 | 208 | ||
| 208 | pipe->nrbufs++; | 209 | pipe->nrbufs++; |
| 209 | page_nr++; | 210 | page_nr++; |
diff --git a/fs/super.c b/fs/super.c index 1709ed029a2c..ea662b0e5e78 100644 --- a/fs/super.c +++ b/fs/super.c | |||
| @@ -1047,7 +1047,7 @@ static int set_bdev_super(struct super_block *s, void *data) | |||
| 1047 | * We set the bdi here to the queue backing, file systems can | 1047 | * We set the bdi here to the queue backing, file systems can |
| 1048 | * overwrite this in ->fill_super() | 1048 | * overwrite this in ->fill_super() |
| 1049 | */ | 1049 | */ |
| 1050 | s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; | 1050 | s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info; |
| 1051 | return 0; | 1051 | return 0; |
| 1052 | } | 1052 | } |
| 1053 | 1053 | ||
diff --git a/fs/timerfd.c b/fs/timerfd.c index c173cc196175..384fa759a563 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
| @@ -40,6 +40,7 @@ struct timerfd_ctx { | |||
| 40 | short unsigned settime_flags; /* to show in fdinfo */ | 40 | short unsigned settime_flags; /* to show in fdinfo */ |
| 41 | struct rcu_head rcu; | 41 | struct rcu_head rcu; |
| 42 | struct list_head clist; | 42 | struct list_head clist; |
| 43 | spinlock_t cancel_lock; | ||
| 43 | bool might_cancel; | 44 | bool might_cancel; |
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| @@ -112,7 +113,7 @@ void timerfd_clock_was_set(void) | |||
| 112 | rcu_read_unlock(); | 113 | rcu_read_unlock(); |
| 113 | } | 114 | } |
| 114 | 115 | ||
| 115 | static void timerfd_remove_cancel(struct timerfd_ctx *ctx) | 116 | static void __timerfd_remove_cancel(struct timerfd_ctx *ctx) |
| 116 | { | 117 | { |
| 117 | if (ctx->might_cancel) { | 118 | if (ctx->might_cancel) { |
| 118 | ctx->might_cancel = false; | 119 | ctx->might_cancel = false; |
| @@ -122,6 +123,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx) | |||
| 122 | } | 123 | } |
| 123 | } | 124 | } |
| 124 | 125 | ||
| 126 | static void timerfd_remove_cancel(struct timerfd_ctx *ctx) | ||
| 127 | { | ||
| 128 | spin_lock(&ctx->cancel_lock); | ||
| 129 | __timerfd_remove_cancel(ctx); | ||
| 130 | spin_unlock(&ctx->cancel_lock); | ||
| 131 | } | ||
| 132 | |||
| 125 | static bool timerfd_canceled(struct timerfd_ctx *ctx) | 133 | static bool timerfd_canceled(struct timerfd_ctx *ctx) |
| 126 | { | 134 | { |
| 127 | if (!ctx->might_cancel || ctx->moffs != KTIME_MAX) | 135 | if (!ctx->might_cancel || ctx->moffs != KTIME_MAX) |
| @@ -132,6 +140,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx) | |||
| 132 | 140 | ||
| 133 | static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) | 141 | static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) |
| 134 | { | 142 | { |
| 143 | spin_lock(&ctx->cancel_lock); | ||
| 135 | if ((ctx->clockid == CLOCK_REALTIME || | 144 | if ((ctx->clockid == CLOCK_REALTIME || |
| 136 | ctx->clockid == CLOCK_REALTIME_ALARM) && | 145 | ctx->clockid == CLOCK_REALTIME_ALARM) && |
| 137 | (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { | 146 | (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { |
| @@ -141,9 +150,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) | |||
| 141 | list_add_rcu(&ctx->clist, &cancel_list); | 150 | list_add_rcu(&ctx->clist, &cancel_list); |
| 142 | spin_unlock(&cancel_lock); | 151 | spin_unlock(&cancel_lock); |
| 143 | } | 152 | } |
| 144 | } else if (ctx->might_cancel) { | 153 | } else { |
| 145 | timerfd_remove_cancel(ctx); | 154 | __timerfd_remove_cancel(ctx); |
| 146 | } | 155 | } |
| 156 | spin_unlock(&ctx->cancel_lock); | ||
| 147 | } | 157 | } |
| 148 | 158 | ||
| 149 | static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) | 159 | static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) |
| @@ -400,6 +410,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
| 400 | return -ENOMEM; | 410 | return -ENOMEM; |
| 401 | 411 | ||
| 402 | init_waitqueue_head(&ctx->wqh); | 412 | init_waitqueue_head(&ctx->wqh); |
| 413 | spin_lock_init(&ctx->cancel_lock); | ||
| 403 | ctx->clockid = clockid; | 414 | ctx->clockid = clockid; |
| 404 | 415 | ||
| 405 | if (isalarm(ctx)) | 416 | if (isalarm(ctx)) |
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index 0a908ae7af13..b0d0623c83ed 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig | |||
| @@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT | |||
| 53 | 53 | ||
| 54 | config UBIFS_FS_ENCRYPTION | 54 | config UBIFS_FS_ENCRYPTION |
| 55 | bool "UBIFS Encryption" | 55 | bool "UBIFS Encryption" |
| 56 | depends on UBIFS_FS | 56 | depends on UBIFS_FS && BLOCK |
| 57 | select FS_ENCRYPTION | 57 | select FS_ENCRYPTION |
| 58 | default n | 58 | default n |
| 59 | help | 59 | help |
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c index 3402720f2b28..382ed428cfd2 100644 --- a/fs/ubifs/crypto.c +++ b/fs/ubifs/crypto.c | |||
| @@ -26,15 +26,6 @@ static unsigned int ubifs_crypt_max_namelen(struct inode *inode) | |||
| 26 | return UBIFS_MAX_NLEN; | 26 | return UBIFS_MAX_NLEN; |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | static int ubifs_key_prefix(struct inode *inode, u8 **key) | ||
| 30 | { | ||
| 31 | static char prefix[] = "ubifs:"; | ||
| 32 | |||
| 33 | *key = prefix; | ||
| 34 | |||
| 35 | return sizeof(prefix) - 1; | ||
| 36 | } | ||
| 37 | |||
| 38 | int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, | 29 | int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, |
| 39 | unsigned int in_len, unsigned int *out_len, int block) | 30 | unsigned int in_len, unsigned int *out_len, int block) |
| 40 | { | 31 | { |
| @@ -86,12 +77,12 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, | |||
| 86 | return 0; | 77 | return 0; |
| 87 | } | 78 | } |
| 88 | 79 | ||
| 89 | struct fscrypt_operations ubifs_crypt_operations = { | 80 | const struct fscrypt_operations ubifs_crypt_operations = { |
| 90 | .flags = FS_CFLG_OWN_PAGES, | 81 | .flags = FS_CFLG_OWN_PAGES, |
| 82 | .key_prefix = "ubifs:", | ||
| 91 | .get_context = ubifs_crypt_get_context, | 83 | .get_context = ubifs_crypt_get_context, |
| 92 | .set_context = ubifs_crypt_set_context, | 84 | .set_context = ubifs_crypt_set_context, |
| 93 | .is_encrypted = __ubifs_crypt_is_encrypted, | 85 | .is_encrypted = __ubifs_crypt_is_encrypted, |
| 94 | .empty_dir = ubifs_crypt_empty_dir, | 86 | .empty_dir = ubifs_crypt_empty_dir, |
| 95 | .max_namelen = ubifs_crypt_max_namelen, | 87 | .max_namelen = ubifs_crypt_max_namelen, |
| 96 | .key_prefix = ubifs_key_prefix, | ||
| 97 | }; | 88 | }; |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 1c5331ac9614..528369f3e472 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
| @@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry, | |||
| 390 | dbg_gen("dent '%pd', mode %#hx in dir ino %lu", | 390 | dbg_gen("dent '%pd', mode %#hx in dir ino %lu", |
| 391 | dentry, mode, dir->i_ino); | 391 | dentry, mode, dir->i_ino); |
| 392 | 392 | ||
| 393 | if (ubifs_crypt_is_encrypted(dir)) { | ||
| 394 | err = fscrypt_get_encryption_info(dir); | ||
| 395 | if (err) | ||
| 396 | return err; | ||
| 397 | |||
| 398 | if (!fscrypt_has_encryption_key(dir)) { | ||
| 399 | return -EPERM; | ||
| 400 | } | ||
| 401 | } | ||
| 402 | |||
| 403 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); | 393 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); |
| 404 | if (err) | 394 | if (err) |
| 405 | return err; | 395 | return err; |
| @@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, | |||
| 741 | ubifs_assert(inode_is_locked(dir)); | 731 | ubifs_assert(inode_is_locked(dir)); |
| 742 | ubifs_assert(inode_is_locked(inode)); | 732 | ubifs_assert(inode_is_locked(inode)); |
| 743 | 733 | ||
| 744 | if (ubifs_crypt_is_encrypted(dir)) { | 734 | if (ubifs_crypt_is_encrypted(dir) && |
| 745 | if (!fscrypt_has_permitted_context(dir, inode)) | 735 | !fscrypt_has_permitted_context(dir, inode)) |
| 746 | return -EPERM; | 736 | return -EPERM; |
| 747 | |||
| 748 | err = fscrypt_get_encryption_info(inode); | ||
| 749 | if (err) | ||
| 750 | return err; | ||
| 751 | |||
| 752 | if (!fscrypt_has_encryption_key(inode)) | ||
| 753 | return -EPERM; | ||
| 754 | } | ||
| 755 | 737 | ||
| 756 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); | 738 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); |
| 757 | if (err) | 739 | if (err) |
| @@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 1000 | if (err) | 982 | if (err) |
| 1001 | return err; | 983 | return err; |
| 1002 | 984 | ||
| 1003 | if (ubifs_crypt_is_encrypted(dir)) { | ||
| 1004 | err = fscrypt_get_encryption_info(dir); | ||
| 1005 | if (err) | ||
| 1006 | goto out_budg; | ||
| 1007 | |||
| 1008 | if (!fscrypt_has_encryption_key(dir)) { | ||
| 1009 | err = -EPERM; | ||
| 1010 | goto out_budg; | ||
| 1011 | } | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); | 985 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); |
| 1015 | if (err) | 986 | if (err) |
| 1016 | goto out_budg; | 987 | goto out_budg; |
| @@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry, | |||
| 1096 | return err; | 1067 | return err; |
| 1097 | } | 1068 | } |
| 1098 | 1069 | ||
| 1099 | if (ubifs_crypt_is_encrypted(dir)) { | ||
| 1100 | err = fscrypt_get_encryption_info(dir); | ||
| 1101 | if (err) | ||
| 1102 | goto out_budg; | ||
| 1103 | |||
| 1104 | if (!fscrypt_has_encryption_key(dir)) { | ||
| 1105 | err = -EPERM; | ||
| 1106 | goto out_budg; | ||
| 1107 | } | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); | 1070 | err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); |
| 1111 | if (err) | 1071 | if (err) |
| 1112 | goto out_budg; | 1072 | goto out_budg; |
| @@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, | |||
| 1231 | goto out_inode; | 1191 | goto out_inode; |
| 1232 | } | 1192 | } |
| 1233 | 1193 | ||
| 1234 | err = fscrypt_get_encryption_info(inode); | ||
| 1235 | if (err) { | ||
| 1236 | kfree(sd); | ||
| 1237 | goto out_inode; | ||
| 1238 | } | ||
| 1239 | |||
| 1240 | if (!fscrypt_has_encryption_key(inode)) { | ||
| 1241 | kfree(sd); | ||
| 1242 | err = -EPERM; | ||
| 1243 | goto out_inode; | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | ostr.name = sd->encrypted_path; | 1194 | ostr.name = sd->encrypted_path; |
| 1247 | ostr.len = disk_link.len; | 1195 | ostr.len = disk_link.len; |
| 1248 | 1196 | ||
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 78d713644df3..da519ba205f6 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c | |||
| @@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 217 | case FS_IOC32_SETFLAGS: | 217 | case FS_IOC32_SETFLAGS: |
| 218 | cmd = FS_IOC_SETFLAGS; | 218 | cmd = FS_IOC_SETFLAGS; |
| 219 | break; | 219 | break; |
| 220 | case FS_IOC_SET_ENCRYPTION_POLICY: | ||
| 221 | case FS_IOC_GET_ENCRYPTION_POLICY: | ||
| 222 | break; | ||
| 220 | default: | 223 | default: |
| 221 | return -ENOIOCTLCMD; | 224 | return -ENOIOCTLCMD; |
| 222 | } | 225 | } |
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index a459211a1c21..294519b98874 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c | |||
| @@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, | |||
| 744 | 744 | ||
| 745 | } else { | 745 | } else { |
| 746 | data->compr_size = 0; | 746 | data->compr_size = 0; |
| 747 | out_len = compr_len; | ||
| 747 | } | 748 | } |
| 748 | 749 | ||
| 749 | dlen = UBIFS_DATA_NODE_SZ + out_len; | 750 | dlen = UBIFS_DATA_NODE_SZ + out_len; |
| @@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in | |||
| 1319 | dn->compr_type = cpu_to_le16(compr_type); | 1320 | dn->compr_type = cpu_to_le16(compr_type); |
| 1320 | dn->size = cpu_to_le32(*new_len); | 1321 | dn->size = cpu_to_le32(*new_len); |
| 1321 | *new_len = UBIFS_DATA_NODE_SZ + out_len; | 1322 | *new_len = UBIFS_DATA_NODE_SZ + out_len; |
| 1323 | err = 0; | ||
| 1322 | out: | 1324 | out: |
| 1323 | kfree(buf); | 1325 | kfree(buf); |
| 1324 | return err; | 1326 | return err; |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index e08aa04fc835..b73811bd7676 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
| @@ -2000,7 +2000,7 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi) | |||
| 2000 | } | 2000 | } |
| 2001 | 2001 | ||
| 2002 | #ifndef CONFIG_UBIFS_FS_ENCRYPTION | 2002 | #ifndef CONFIG_UBIFS_FS_ENCRYPTION |
| 2003 | struct fscrypt_operations ubifs_crypt_operations = { | 2003 | const struct fscrypt_operations ubifs_crypt_operations = { |
| 2004 | .is_encrypted = __ubifs_crypt_is_encrypted, | 2004 | .is_encrypted = __ubifs_crypt_is_encrypted, |
| 2005 | }; | 2005 | }; |
| 2006 | #endif | 2006 | #endif |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 74ae2de949df..709aa098dd46 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
| @@ -34,6 +34,11 @@ | |||
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include "ubifs.h" | 35 | #include "ubifs.h" |
| 36 | 36 | ||
| 37 | static int try_read_node(const struct ubifs_info *c, void *buf, int type, | ||
| 38 | int len, int lnum, int offs); | ||
| 39 | static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, | ||
| 40 | struct ubifs_zbranch *zbr, void *node); | ||
| 41 | |||
| 37 | /* | 42 | /* |
| 38 | * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. | 43 | * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. |
| 39 | * @NAME_LESS: name corresponding to the first argument is less than second | 44 | * @NAME_LESS: name corresponding to the first argument is less than second |
| @@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, | |||
| 402 | return 0; | 407 | return 0; |
| 403 | } | 408 | } |
| 404 | 409 | ||
| 405 | err = ubifs_tnc_read_node(c, zbr, node); | 410 | if (c->replaying) { |
| 411 | err = fallible_read_node(c, &zbr->key, zbr, node); | ||
| 412 | /* | ||
| 413 | * When the node was not found, return -ENOENT, 0 otherwise. | ||
| 414 | * Negative return codes stay as-is. | ||
| 415 | */ | ||
| 416 | if (err == 0) | ||
| 417 | err = -ENOENT; | ||
| 418 | else if (err == 1) | ||
| 419 | err = 0; | ||
| 420 | } else { | ||
| 421 | err = ubifs_tnc_read_node(c, zbr, node); | ||
| 422 | } | ||
| 406 | if (err) | 423 | if (err) |
| 407 | return err; | 424 | return err; |
| 408 | 425 | ||
| @@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, | |||
| 2857 | if (fname_len(nm) > 0) { | 2874 | if (fname_len(nm) > 0) { |
| 2858 | if (err) { | 2875 | if (err) { |
| 2859 | /* Handle collisions */ | 2876 | /* Handle collisions */ |
| 2860 | err = resolve_collision(c, key, &znode, &n, nm); | 2877 | if (c->replaying) |
| 2878 | err = fallible_resolve_collision(c, key, &znode, &n, | ||
| 2879 | nm, 0); | ||
| 2880 | else | ||
| 2881 | err = resolve_collision(c, key, &znode, &n, nm); | ||
| 2861 | dbg_tnc("rc returned %d, znode %p, n %d", | 2882 | dbg_tnc("rc returned %d, znode %p, n %d", |
| 2862 | err, znode, n); | 2883 | err, znode, n); |
| 2863 | if (unlikely(err < 0)) | 2884 | if (unlikely(err < 0)) |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index ca72382ce6cc..f0c86f076535 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
| @@ -38,7 +38,11 @@ | |||
| 38 | #include <linux/backing-dev.h> | 38 | #include <linux/backing-dev.h> |
| 39 | #include <linux/security.h> | 39 | #include <linux/security.h> |
| 40 | #include <linux/xattr.h> | 40 | #include <linux/xattr.h> |
| 41 | #include <linux/fscrypto.h> | 41 | #ifdef CONFIG_UBIFS_FS_ENCRYPTION |
| 42 | #include <linux/fscrypt_supp.h> | ||
| 43 | #else | ||
| 44 | #include <linux/fscrypt_notsupp.h> | ||
| 45 | #endif | ||
| 42 | #include <linux/random.h> | 46 | #include <linux/random.h> |
| 43 | #include "ubifs-media.h" | 47 | #include "ubifs-media.h" |
| 44 | 48 | ||
| @@ -1797,28 +1801,6 @@ int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len, | |||
| 1797 | #include "key.h" | 1801 | #include "key.h" |
| 1798 | 1802 | ||
| 1799 | #ifndef CONFIG_UBIFS_FS_ENCRYPTION | 1803 | #ifndef CONFIG_UBIFS_FS_ENCRYPTION |
| 1800 | #define fscrypt_set_d_op(i) | ||
| 1801 | #define fscrypt_get_ctx fscrypt_notsupp_get_ctx | ||
| 1802 | #define fscrypt_release_ctx fscrypt_notsupp_release_ctx | ||
| 1803 | #define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page | ||
| 1804 | #define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page | ||
| 1805 | #define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages | ||
| 1806 | #define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page | ||
| 1807 | #define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page | ||
| 1808 | #define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range | ||
| 1809 | #define fscrypt_ioctl_set_policy fscrypt_notsupp_ioctl_set_policy | ||
| 1810 | #define fscrypt_ioctl_get_policy fscrypt_notsupp_ioctl_get_policy | ||
| 1811 | #define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context | ||
| 1812 | #define fscrypt_inherit_context fscrypt_notsupp_inherit_context | ||
| 1813 | #define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info | ||
| 1814 | #define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info | ||
| 1815 | #define fscrypt_setup_filename fscrypt_notsupp_setup_filename | ||
| 1816 | #define fscrypt_free_filename fscrypt_notsupp_free_filename | ||
| 1817 | #define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size | ||
| 1818 | #define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer | ||
| 1819 | #define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer | ||
| 1820 | #define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr | ||
| 1821 | #define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk | ||
| 1822 | static inline int ubifs_encrypt(const struct inode *inode, | 1804 | static inline int ubifs_encrypt(const struct inode *inode, |
| 1823 | struct ubifs_data_node *dn, | 1805 | struct ubifs_data_node *dn, |
| 1824 | unsigned int in_len, unsigned int *out_len, | 1806 | unsigned int in_len, unsigned int *out_len, |
| @@ -1842,7 +1824,7 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, | |||
| 1842 | unsigned int *out_len, int block); | 1824 | unsigned int *out_len, int block); |
| 1843 | #endif | 1825 | #endif |
| 1844 | 1826 | ||
| 1845 | extern struct fscrypt_operations ubifs_crypt_operations; | 1827 | extern const struct fscrypt_operations ubifs_crypt_operations; |
| 1846 | 1828 | ||
| 1847 | static inline bool __ubifs_crypt_is_encrypted(struct inode *inode) | 1829 | static inline bool __ubifs_crypt_is_encrypted(struct inode *inode) |
| 1848 | { | 1830 | { |
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h index 4792b771aa80..9f24bd1a9f44 100644 --- a/fs/udf/ecma_167.h +++ b/fs/udf/ecma_167.h | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | struct charspec { | 41 | struct charspec { |
| 42 | uint8_t charSetType; | 42 | uint8_t charSetType; |
| 43 | uint8_t charSetInfo[63]; | 43 | uint8_t charSetInfo[63]; |
| 44 | } __attribute__ ((packed)); | 44 | } __packed; |
| 45 | 45 | ||
| 46 | /* Character Set Type (ECMA 167r3 1/7.2.1.1) */ | 46 | /* Character Set Type (ECMA 167r3 1/7.2.1.1) */ |
| 47 | #define CHARSPEC_TYPE_CS0 0x00 /* (1/7.2.2) */ | 47 | #define CHARSPEC_TYPE_CS0 0x00 /* (1/7.2.2) */ |
| @@ -68,7 +68,7 @@ struct timestamp { | |||
| 68 | uint8_t centiseconds; | 68 | uint8_t centiseconds; |
| 69 | uint8_t hundredsOfMicroseconds; | 69 | uint8_t hundredsOfMicroseconds; |
| 70 | uint8_t microseconds; | 70 | uint8_t microseconds; |
| 71 | } __attribute__ ((packed)); | 71 | } __packed; |
| 72 | 72 | ||
| 73 | /* Type and Time Zone (ECMA 167r3 1/7.3.1) */ | 73 | /* Type and Time Zone (ECMA 167r3 1/7.3.1) */ |
| 74 | #define TIMESTAMP_TYPE_MASK 0xF000 | 74 | #define TIMESTAMP_TYPE_MASK 0xF000 |
| @@ -82,7 +82,7 @@ struct regid { | |||
| 82 | uint8_t flags; | 82 | uint8_t flags; |
| 83 | uint8_t ident[23]; | 83 | uint8_t ident[23]; |
| 84 | uint8_t identSuffix[8]; | 84 | uint8_t identSuffix[8]; |
| 85 | } __attribute__ ((packed)); | 85 | } __packed; |
| 86 | 86 | ||
| 87 | /* Flags (ECMA 167r3 1/7.4.1) */ | 87 | /* Flags (ECMA 167r3 1/7.4.1) */ |
| 88 | #define ENTITYID_FLAGS_DIRTY 0x00 | 88 | #define ENTITYID_FLAGS_DIRTY 0x00 |
| @@ -95,7 +95,7 @@ struct volStructDesc { | |||
| 95 | uint8_t stdIdent[VSD_STD_ID_LEN]; | 95 | uint8_t stdIdent[VSD_STD_ID_LEN]; |
| 96 | uint8_t structVersion; | 96 | uint8_t structVersion; |
| 97 | uint8_t structData[2041]; | 97 | uint8_t structData[2041]; |
| 98 | } __attribute__ ((packed)); | 98 | } __packed; |
| 99 | 99 | ||
| 100 | /* Standard Identifier (EMCA 167r2 2/9.1.2) */ | 100 | /* Standard Identifier (EMCA 167r2 2/9.1.2) */ |
| 101 | #define VSD_STD_ID_NSR02 "NSR02" /* (3/9.1) */ | 101 | #define VSD_STD_ID_NSR02 "NSR02" /* (3/9.1) */ |
| @@ -114,7 +114,7 @@ struct beginningExtendedAreaDesc { | |||
| 114 | uint8_t stdIdent[VSD_STD_ID_LEN]; | 114 | uint8_t stdIdent[VSD_STD_ID_LEN]; |
| 115 | uint8_t structVersion; | 115 | uint8_t structVersion; |
| 116 | uint8_t structData[2041]; | 116 | uint8_t structData[2041]; |
| 117 | } __attribute__ ((packed)); | 117 | } __packed; |
| 118 | 118 | ||
| 119 | /* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */ | 119 | /* Terminating Extended Area Descriptor (ECMA 167r3 2/9.3) */ |
| 120 | struct terminatingExtendedAreaDesc { | 120 | struct terminatingExtendedAreaDesc { |
| @@ -122,7 +122,7 @@ struct terminatingExtendedAreaDesc { | |||
| 122 | uint8_t stdIdent[VSD_STD_ID_LEN]; | 122 | uint8_t stdIdent[VSD_STD_ID_LEN]; |
| 123 | uint8_t structVersion; | 123 | uint8_t structVersion; |
| 124 | uint8_t structData[2041]; | 124 | uint8_t structData[2041]; |
| 125 | } __attribute__ ((packed)); | 125 | } __packed; |
| 126 | 126 | ||
| 127 | /* Boot Descriptor (ECMA 167r3 2/9.4) */ | 127 | /* Boot Descriptor (ECMA 167r3 2/9.4) */ |
| 128 | struct bootDesc { | 128 | struct bootDesc { |
| @@ -140,7 +140,7 @@ struct bootDesc { | |||
| 140 | __le16 flags; | 140 | __le16 flags; |
| 141 | uint8_t reserved2[32]; | 141 | uint8_t reserved2[32]; |
| 142 | uint8_t bootUse[1906]; | 142 | uint8_t bootUse[1906]; |
| 143 | } __attribute__ ((packed)); | 143 | } __packed; |
| 144 | 144 | ||
| 145 | /* Flags (ECMA 167r3 2/9.4.12) */ | 145 | /* Flags (ECMA 167r3 2/9.4.12) */ |
| 146 | #define BOOT_FLAGS_ERASE 0x01 | 146 | #define BOOT_FLAGS_ERASE 0x01 |
| @@ -149,7 +149,7 @@ struct bootDesc { | |||
| 149 | struct extent_ad { | 149 | struct extent_ad { |
| 150 | __le32 extLength; | 150 | __le32 extLength; |
| 151 | __le32 extLocation; | 151 | __le32 extLocation; |
| 152 | } __attribute__ ((packed)); | 152 | } __packed; |
| 153 | 153 | ||
| 154 | struct kernel_extent_ad { | 154 | struct kernel_extent_ad { |
| 155 | uint32_t extLength; | 155 | uint32_t extLength; |
| @@ -166,7 +166,7 @@ struct tag { | |||
| 166 | __le16 descCRC; | 166 | __le16 descCRC; |
| 167 | __le16 descCRCLength; | 167 | __le16 descCRCLength; |
| 168 | __le32 tagLocation; | 168 | __le32 tagLocation; |
| 169 | } __attribute__ ((packed)); | 169 | } __packed; |
| 170 | 170 | ||
| 171 | /* Tag Identifier (ECMA 167r3 3/7.2.1) */ | 171 | /* Tag Identifier (ECMA 167r3 3/7.2.1) */ |
| 172 | #define TAG_IDENT_PVD 0x0001 | 172 | #define TAG_IDENT_PVD 0x0001 |
| @@ -186,7 +186,7 @@ struct NSRDesc { | |||
| 186 | uint8_t structVersion; | 186 | uint8_t structVersion; |
| 187 | uint8_t reserved; | 187 | uint8_t reserved; |
| 188 | uint8_t structData[2040]; | 188 | uint8_t structData[2040]; |
| 189 | } __attribute__ ((packed)); | 189 | } __packed; |
| 190 | 190 | ||
| 191 | /* Primary Volume Descriptor (ECMA 167r3 3/10.1) */ | 191 | /* Primary Volume Descriptor (ECMA 167r3 3/10.1) */ |
| 192 | struct primaryVolDesc { | 192 | struct primaryVolDesc { |
| @@ -212,7 +212,7 @@ struct primaryVolDesc { | |||
| 212 | __le32 predecessorVolDescSeqLocation; | 212 | __le32 predecessorVolDescSeqLocation; |
| 213 | __le16 flags; | 213 | __le16 flags; |
| 214 | uint8_t reserved[22]; | 214 | uint8_t reserved[22]; |
| 215 | } __attribute__ ((packed)); | 215 | } __packed; |
| 216 | 216 | ||
| 217 | /* Flags (ECMA 167r3 3/10.1.21) */ | 217 | /* Flags (ECMA 167r3 3/10.1.21) */ |
| 218 | #define PVD_FLAGS_VSID_COMMON 0x0001 | 218 | #define PVD_FLAGS_VSID_COMMON 0x0001 |
| @@ -223,7 +223,7 @@ struct anchorVolDescPtr { | |||
| 223 | struct extent_ad mainVolDescSeqExt; | 223 | struct extent_ad mainVolDescSeqExt; |
| 224 | struct extent_ad reserveVolDescSeqExt; | 224 | struct extent_ad reserveVolDescSeqExt; |
| 225 | uint8_t reserved[480]; | 225 | uint8_t reserved[480]; |
| 226 | } __attribute__ ((packed)); | 226 | } __packed; |
| 227 | 227 | ||
| 228 | /* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */ | 228 | /* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */ |
| 229 | struct volDescPtr { | 229 | struct volDescPtr { |
| @@ -231,7 +231,7 @@ struct volDescPtr { | |||
| 231 | __le32 volDescSeqNum; | 231 | __le32 volDescSeqNum; |
| 232 | struct extent_ad nextVolDescSeqExt; | 232 | struct extent_ad nextVolDescSeqExt; |
| 233 | uint8_t reserved[484]; | 233 | uint8_t reserved[484]; |
| 234 | } __attribute__ ((packed)); | 234 | } __packed; |
| 235 | 235 | ||
| 236 | /* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */ | 236 | /* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */ |
| 237 | struct impUseVolDesc { | 237 | struct impUseVolDesc { |
| @@ -239,7 +239,7 @@ struct impUseVolDesc { | |||
| 239 | __le32 volDescSeqNum; | 239 | __le32 volDescSeqNum; |
| 240 | struct regid impIdent; | 240 | struct regid impIdent; |
| 241 | uint8_t impUse[460]; | 241 | uint8_t impUse[460]; |
| 242 | } __attribute__ ((packed)); | 242 | } __packed; |
| 243 | 243 | ||
| 244 | /* Partition Descriptor (ECMA 167r3 3/10.5) */ | 244 | /* Partition Descriptor (ECMA 167r3 3/10.5) */ |
| 245 | struct partitionDesc { | 245 | struct partitionDesc { |
| @@ -255,7 +255,7 @@ struct partitionDesc { | |||
| 255 | struct regid impIdent; | 255 | struct regid impIdent; |
| 256 | uint8_t impUse[128]; | 256 | uint8_t impUse[128]; |
| 257 | uint8_t reserved[156]; | 257 | uint8_t reserved[156]; |
| 258 | } __attribute__ ((packed)); | 258 | } __packed; |
| 259 | 259 | ||
| 260 | /* Partition Flags (ECMA 167r3 3/10.5.3) */ | 260 | /* Partition Flags (ECMA 167r3 3/10.5.3) */ |
| 261 | #define PD_PARTITION_FLAGS_ALLOC 0x0001 | 261 | #define PD_PARTITION_FLAGS_ALLOC 0x0001 |
| @@ -291,14 +291,14 @@ struct logicalVolDesc { | |||
| 291 | uint8_t impUse[128]; | 291 | uint8_t impUse[128]; |
| 292 | struct extent_ad integritySeqExt; | 292 | struct extent_ad integritySeqExt; |
| 293 | uint8_t partitionMaps[0]; | 293 | uint8_t partitionMaps[0]; |
| 294 | } __attribute__ ((packed)); | 294 | } __packed; |
| 295 | 295 | ||
| 296 | /* Generic Partition Map (ECMA 167r3 3/10.7.1) */ | 296 | /* Generic Partition Map (ECMA 167r3 3/10.7.1) */ |
| 297 | struct genericPartitionMap { | 297 | struct genericPartitionMap { |
| 298 | uint8_t partitionMapType; | 298 | uint8_t partitionMapType; |
| 299 | uint8_t partitionMapLength; | 299 | uint8_t partitionMapLength; |
| 300 | uint8_t partitionMapping[0]; | 300 | uint8_t partitionMapping[0]; |
| 301 | } __attribute__ ((packed)); | 301 | } __packed; |
| 302 | 302 | ||
| 303 | /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */ | 303 | /* Partition Map Type (ECMA 167r3 3/10.7.1.1) */ |
| 304 | #define GP_PARTITION_MAP_TYPE_UNDEF 0x00 | 304 | #define GP_PARTITION_MAP_TYPE_UNDEF 0x00 |
| @@ -311,14 +311,14 @@ struct genericPartitionMap1 { | |||
| 311 | uint8_t partitionMapLength; | 311 | uint8_t partitionMapLength; |
| 312 | __le16 volSeqNum; | 312 | __le16 volSeqNum; |
| 313 | __le16 partitionNum; | 313 | __le16 partitionNum; |
| 314 | } __attribute__ ((packed)); | 314 | } __packed; |
| 315 | 315 | ||
| 316 | /* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */ | 316 | /* Type 2 Partition Map (ECMA 167r3 3/10.7.3) */ |
| 317 | struct genericPartitionMap2 { | 317 | struct genericPartitionMap2 { |
| 318 | uint8_t partitionMapType; | 318 | uint8_t partitionMapType; |
| 319 | uint8_t partitionMapLength; | 319 | uint8_t partitionMapLength; |
| 320 | uint8_t partitionIdent[62]; | 320 | uint8_t partitionIdent[62]; |
| 321 | } __attribute__ ((packed)); | 321 | } __packed; |
| 322 | 322 | ||
| 323 | /* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */ | 323 | /* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */ |
| 324 | struct unallocSpaceDesc { | 324 | struct unallocSpaceDesc { |
| @@ -326,13 +326,13 @@ struct unallocSpaceDesc { | |||
| 326 | __le32 volDescSeqNum; | 326 | __le32 volDescSeqNum; |
| 327 | __le32 numAllocDescs; | 327 | __le32 numAllocDescs; |
| 328 | struct extent_ad allocDescs[0]; | 328 | struct extent_ad allocDescs[0]; |
| 329 | } __attribute__ ((packed)); | 329 | } __packed; |
| 330 | 330 | ||
| 331 | /* Terminating Descriptor (ECMA 167r3 3/10.9) */ | 331 | /* Terminating Descriptor (ECMA 167r3 3/10.9) */ |
| 332 | struct terminatingDesc { | 332 | struct terminatingDesc { |
| 333 | struct tag descTag; | 333 | struct tag descTag; |
| 334 | uint8_t reserved[496]; | 334 | uint8_t reserved[496]; |
| 335 | } __attribute__ ((packed)); | 335 | } __packed; |
| 336 | 336 | ||
| 337 | /* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */ | 337 | /* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */ |
| 338 | struct logicalVolIntegrityDesc { | 338 | struct logicalVolIntegrityDesc { |
| @@ -346,7 +346,7 @@ struct logicalVolIntegrityDesc { | |||
| 346 | __le32 freeSpaceTable[0]; | 346 | __le32 freeSpaceTable[0]; |
| 347 | __le32 sizeTable[0]; | 347 | __le32 sizeTable[0]; |
| 348 | uint8_t impUse[0]; | 348 | uint8_t impUse[0]; |
| 349 | } __attribute__ ((packed)); | 349 | } __packed; |
| 350 | 350 | ||
| 351 | /* Integrity Type (ECMA 167r3 3/10.10.3) */ | 351 | /* Integrity Type (ECMA 167r3 3/10.10.3) */ |
| 352 | #define LVID_INTEGRITY_TYPE_OPEN 0x00000000 | 352 | #define LVID_INTEGRITY_TYPE_OPEN 0x00000000 |
| @@ -356,7 +356,7 @@ struct logicalVolIntegrityDesc { | |||
| 356 | struct lb_addr { | 356 | struct lb_addr { |
| 357 | __le32 logicalBlockNum; | 357 | __le32 logicalBlockNum; |
| 358 | __le16 partitionReferenceNum; | 358 | __le16 partitionReferenceNum; |
| 359 | } __attribute__ ((packed)); | 359 | } __packed; |
| 360 | 360 | ||
| 361 | /* ... and its in-core analog */ | 361 | /* ... and its in-core analog */ |
| 362 | struct kernel_lb_addr { | 362 | struct kernel_lb_addr { |
| @@ -368,14 +368,14 @@ struct kernel_lb_addr { | |||
| 368 | struct short_ad { | 368 | struct short_ad { |
| 369 | __le32 extLength; | 369 | __le32 extLength; |
| 370 | __le32 extPosition; | 370 | __le32 extPosition; |
| 371 | } __attribute__ ((packed)); | 371 | } __packed; |
| 372 | 372 | ||
| 373 | /* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */ | 373 | /* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */ |
| 374 | struct long_ad { | 374 | struct long_ad { |
| 375 | __le32 extLength; | 375 | __le32 extLength; |
| 376 | struct lb_addr extLocation; | 376 | struct lb_addr extLocation; |
| 377 | uint8_t impUse[6]; | 377 | uint8_t impUse[6]; |
| 378 | } __attribute__ ((packed)); | 378 | } __packed; |
| 379 | 379 | ||
| 380 | struct kernel_long_ad { | 380 | struct kernel_long_ad { |
| 381 | uint32_t extLength; | 381 | uint32_t extLength; |
| @@ -389,7 +389,7 @@ struct ext_ad { | |||
| 389 | __le32 recordedLength; | 389 | __le32 recordedLength; |
| 390 | __le32 informationLength; | 390 | __le32 informationLength; |
| 391 | struct lb_addr extLocation; | 391 | struct lb_addr extLocation; |
| 392 | } __attribute__ ((packed)); | 392 | } __packed; |
| 393 | 393 | ||
| 394 | struct kernel_ext_ad { | 394 | struct kernel_ext_ad { |
| 395 | uint32_t extLength; | 395 | uint32_t extLength; |
| @@ -434,7 +434,7 @@ struct fileSetDesc { | |||
| 434 | struct long_ad nextExt; | 434 | struct long_ad nextExt; |
| 435 | struct long_ad streamDirectoryICB; | 435 | struct long_ad streamDirectoryICB; |
| 436 | uint8_t reserved[32]; | 436 | uint8_t reserved[32]; |
| 437 | } __attribute__ ((packed)); | 437 | } __packed; |
| 438 | 438 | ||
| 439 | /* Partition Header Descriptor (ECMA 167r3 4/14.3) */ | 439 | /* Partition Header Descriptor (ECMA 167r3 4/14.3) */ |
| 440 | struct partitionHeaderDesc { | 440 | struct partitionHeaderDesc { |
| @@ -444,7 +444,7 @@ struct partitionHeaderDesc { | |||
| 444 | struct short_ad freedSpaceTable; | 444 | struct short_ad freedSpaceTable; |
| 445 | struct short_ad freedSpaceBitmap; | 445 | struct short_ad freedSpaceBitmap; |
| 446 | uint8_t reserved[88]; | 446 | uint8_t reserved[88]; |
| 447 | } __attribute__ ((packed)); | 447 | } __packed; |
| 448 | 448 | ||
| 449 | /* File Identifier Descriptor (ECMA 167r3 4/14.4) */ | 449 | /* File Identifier Descriptor (ECMA 167r3 4/14.4) */ |
| 450 | struct fileIdentDesc { | 450 | struct fileIdentDesc { |
| @@ -457,7 +457,7 @@ struct fileIdentDesc { | |||
| 457 | uint8_t impUse[0]; | 457 | uint8_t impUse[0]; |
| 458 | uint8_t fileIdent[0]; | 458 | uint8_t fileIdent[0]; |
| 459 | uint8_t padding[0]; | 459 | uint8_t padding[0]; |
| 460 | } __attribute__ ((packed)); | 460 | } __packed; |
| 461 | 461 | ||
| 462 | /* File Characteristics (ECMA 167r3 4/14.4.3) */ | 462 | /* File Characteristics (ECMA 167r3 4/14.4.3) */ |
| 463 | #define FID_FILE_CHAR_HIDDEN 0x01 | 463 | #define FID_FILE_CHAR_HIDDEN 0x01 |
| @@ -471,7 +471,7 @@ struct allocExtDesc { | |||
| 471 | struct tag descTag; | 471 | struct tag descTag; |
| 472 | __le32 previousAllocExtLocation; | 472 | __le32 previousAllocExtLocation; |
| 473 | __le32 lengthAllocDescs; | 473 | __le32 lengthAllocDescs; |
| 474 | } __attribute__ ((packed)); | 474 | } __packed; |
| 475 | 475 | ||
| 476 | /* ICB Tag (ECMA 167r3 4/14.6) */ | 476 | /* ICB Tag (ECMA 167r3 4/14.6) */ |
| 477 | struct icbtag { | 477 | struct icbtag { |
| @@ -483,7 +483,7 @@ struct icbtag { | |||
| 483 | uint8_t fileType; | 483 | uint8_t fileType; |
| 484 | struct lb_addr parentICBLocation; | 484 | struct lb_addr parentICBLocation; |
| 485 | __le16 flags; | 485 | __le16 flags; |
| 486 | } __attribute__ ((packed)); | 486 | } __packed; |
| 487 | 487 | ||
| 488 | /* Strategy Type (ECMA 167r3 4/14.6.2) */ | 488 | /* Strategy Type (ECMA 167r3 4/14.6.2) */ |
| 489 | #define ICBTAG_STRATEGY_TYPE_UNDEF 0x0000 | 489 | #define ICBTAG_STRATEGY_TYPE_UNDEF 0x0000 |
| @@ -531,13 +531,13 @@ struct indirectEntry { | |||
| 531 | struct tag descTag; | 531 | struct tag descTag; |
| 532 | struct icbtag icbTag; | 532 | struct icbtag icbTag; |
| 533 | struct long_ad indirectICB; | 533 | struct long_ad indirectICB; |
| 534 | } __attribute__ ((packed)); | 534 | } __packed; |
| 535 | 535 | ||
| 536 | /* Terminal Entry (ECMA 167r3 4/14.8) */ | 536 | /* Terminal Entry (ECMA 167r3 4/14.8) */ |
| 537 | struct terminalEntry { | 537 | struct terminalEntry { |
| 538 | struct tag descTag; | 538 | struct tag descTag; |
| 539 | struct icbtag icbTag; | 539 | struct icbtag icbTag; |
| 540 | } __attribute__ ((packed)); | 540 | } __packed; |
| 541 | 541 | ||
| 542 | /* File Entry (ECMA 167r3 4/14.9) */ | 542 | /* File Entry (ECMA 167r3 4/14.9) */ |
| 543 | struct fileEntry { | 543 | struct fileEntry { |
| @@ -563,7 +563,7 @@ struct fileEntry { | |||
| 563 | __le32 lengthAllocDescs; | 563 | __le32 lengthAllocDescs; |
| 564 | uint8_t extendedAttr[0]; | 564 | uint8_t extendedAttr[0]; |
| 565 | uint8_t allocDescs[0]; | 565 | uint8_t allocDescs[0]; |
| 566 | } __attribute__ ((packed)); | 566 | } __packed; |
| 567 | 567 | ||
| 568 | /* Permissions (ECMA 167r3 4/14.9.5) */ | 568 | /* Permissions (ECMA 167r3 4/14.9.5) */ |
| 569 | #define FE_PERM_O_EXEC 0x00000001U | 569 | #define FE_PERM_O_EXEC 0x00000001U |
| @@ -607,7 +607,7 @@ struct extendedAttrHeaderDesc { | |||
| 607 | struct tag descTag; | 607 | struct tag descTag; |
| 608 | __le32 impAttrLocation; | 608 | __le32 impAttrLocation; |
| 609 | __le32 appAttrLocation; | 609 | __le32 appAttrLocation; |
| 610 | } __attribute__ ((packed)); | 610 | } __packed; |
| 611 | 611 | ||
| 612 | /* Generic Format (ECMA 167r3 4/14.10.2) */ | 612 | /* Generic Format (ECMA 167r3 4/14.10.2) */ |
| 613 | struct genericFormat { | 613 | struct genericFormat { |
| @@ -616,7 +616,7 @@ struct genericFormat { | |||
| 616 | uint8_t reserved[3]; | 616 | uint8_t reserved[3]; |
| 617 | __le32 attrLength; | 617 | __le32 attrLength; |
| 618 | uint8_t attrData[0]; | 618 | uint8_t attrData[0]; |
| 619 | } __attribute__ ((packed)); | 619 | } __packed; |
| 620 | 620 | ||
| 621 | /* Character Set Information (ECMA 167r3 4/14.10.3) */ | 621 | /* Character Set Information (ECMA 167r3 4/14.10.3) */ |
| 622 | struct charSetInfo { | 622 | struct charSetInfo { |
| @@ -627,7 +627,7 @@ struct charSetInfo { | |||
| 627 | __le32 escapeSeqLength; | 627 | __le32 escapeSeqLength; |
| 628 | uint8_t charSetType; | 628 | uint8_t charSetType; |
| 629 | uint8_t escapeSeq[0]; | 629 | uint8_t escapeSeq[0]; |
| 630 | } __attribute__ ((packed)); | 630 | } __packed; |
| 631 | 631 | ||
| 632 | /* Alternate Permissions (ECMA 167r3 4/14.10.4) */ | 632 | /* Alternate Permissions (ECMA 167r3 4/14.10.4) */ |
| 633 | struct altPerms { | 633 | struct altPerms { |
| @@ -638,7 +638,7 @@ struct altPerms { | |||
| 638 | __le16 ownerIdent; | 638 | __le16 ownerIdent; |
| 639 | __le16 groupIdent; | 639 | __le16 groupIdent; |
| 640 | __le16 permission; | 640 | __le16 permission; |
| 641 | } __attribute__ ((packed)); | 641 | } __packed; |
| 642 | 642 | ||
| 643 | /* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */ | 643 | /* File Times Extended Attribute (ECMA 167r3 4/14.10.5) */ |
| 644 | struct fileTimesExtAttr { | 644 | struct fileTimesExtAttr { |
| @@ -649,7 +649,7 @@ struct fileTimesExtAttr { | |||
| 649 | __le32 dataLength; | 649 | __le32 dataLength; |
| 650 | __le32 fileTimeExistence; | 650 | __le32 fileTimeExistence; |
| 651 | uint8_t fileTimes; | 651 | uint8_t fileTimes; |
| 652 | } __attribute__ ((packed)); | 652 | } __packed; |
| 653 | 653 | ||
| 654 | /* FileTimeExistence (ECMA 167r3 4/14.10.5.6) */ | 654 | /* FileTimeExistence (ECMA 167r3 4/14.10.5.6) */ |
| 655 | #define FTE_CREATION 0x00000001 | 655 | #define FTE_CREATION 0x00000001 |
| @@ -666,7 +666,7 @@ struct infoTimesExtAttr { | |||
| 666 | __le32 dataLength; | 666 | __le32 dataLength; |
| 667 | __le32 infoTimeExistence; | 667 | __le32 infoTimeExistence; |
| 668 | uint8_t infoTimes[0]; | 668 | uint8_t infoTimes[0]; |
| 669 | } __attribute__ ((packed)); | 669 | } __packed; |
| 670 | 670 | ||
| 671 | /* Device Specification (ECMA 167r3 4/14.10.7) */ | 671 | /* Device Specification (ECMA 167r3 4/14.10.7) */ |
| 672 | struct deviceSpec { | 672 | struct deviceSpec { |
| @@ -678,7 +678,7 @@ struct deviceSpec { | |||
| 678 | __le32 majorDeviceIdent; | 678 | __le32 majorDeviceIdent; |
| 679 | __le32 minorDeviceIdent; | 679 | __le32 minorDeviceIdent; |
| 680 | uint8_t impUse[0]; | 680 | uint8_t impUse[0]; |
| 681 | } __attribute__ ((packed)); | 681 | } __packed; |
| 682 | 682 | ||
| 683 | /* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */ | 683 | /* Implementation Use Extended Attr (ECMA 167r3 4/14.10.8) */ |
| 684 | struct impUseExtAttr { | 684 | struct impUseExtAttr { |
| @@ -689,7 +689,7 @@ struct impUseExtAttr { | |||
| 689 | __le32 impUseLength; | 689 | __le32 impUseLength; |
| 690 | struct regid impIdent; | 690 | struct regid impIdent; |
| 691 | uint8_t impUse[0]; | 691 | uint8_t impUse[0]; |
| 692 | } __attribute__ ((packed)); | 692 | } __packed; |
| 693 | 693 | ||
| 694 | /* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */ | 694 | /* Application Use Extended Attribute (ECMA 167r3 4/14.10.9) */ |
| 695 | struct appUseExtAttr { | 695 | struct appUseExtAttr { |
| @@ -700,7 +700,7 @@ struct appUseExtAttr { | |||
| 700 | __le32 appUseLength; | 700 | __le32 appUseLength; |
| 701 | struct regid appIdent; | 701 | struct regid appIdent; |
| 702 | uint8_t appUse[0]; | 702 | uint8_t appUse[0]; |
| 703 | } __attribute__ ((packed)); | 703 | } __packed; |
| 704 | 704 | ||
| 705 | #define EXTATTR_CHAR_SET 1 | 705 | #define EXTATTR_CHAR_SET 1 |
| 706 | #define EXTATTR_ALT_PERMS 3 | 706 | #define EXTATTR_ALT_PERMS 3 |
| @@ -716,7 +716,7 @@ struct unallocSpaceEntry { | |||
| 716 | struct icbtag icbTag; | 716 | struct icbtag icbTag; |
| 717 | __le32 lengthAllocDescs; | 717 | __le32 lengthAllocDescs; |
| 718 | uint8_t allocDescs[0]; | 718 | uint8_t allocDescs[0]; |
| 719 | } __attribute__ ((packed)); | 719 | } __packed; |
| 720 | 720 | ||
| 721 | /* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */ | 721 | /* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */ |
| 722 | struct spaceBitmapDesc { | 722 | struct spaceBitmapDesc { |
| @@ -724,7 +724,7 @@ struct spaceBitmapDesc { | |||
| 724 | __le32 numOfBits; | 724 | __le32 numOfBits; |
| 725 | __le32 numOfBytes; | 725 | __le32 numOfBytes; |
| 726 | uint8_t bitmap[0]; | 726 | uint8_t bitmap[0]; |
| 727 | } __attribute__ ((packed)); | 727 | } __packed; |
| 728 | 728 | ||
| 729 | /* Partition Integrity Entry (ECMA 167r3 4/14.13) */ | 729 | /* Partition Integrity Entry (ECMA 167r3 4/14.13) */ |
| 730 | struct partitionIntegrityEntry { | 730 | struct partitionIntegrityEntry { |
| @@ -735,7 +735,7 @@ struct partitionIntegrityEntry { | |||
| 735 | uint8_t reserved[175]; | 735 | uint8_t reserved[175]; |
| 736 | struct regid impIdent; | 736 | struct regid impIdent; |
| 737 | uint8_t impUse[256]; | 737 | uint8_t impUse[256]; |
| 738 | } __attribute__ ((packed)); | 738 | } __packed; |
| 739 | 739 | ||
| 740 | /* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */ | 740 | /* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */ |
| 741 | 741 | ||
| @@ -753,7 +753,7 @@ struct partitionIntegrityEntry { | |||
| 753 | struct logicalVolHeaderDesc { | 753 | struct logicalVolHeaderDesc { |
| 754 | __le64 uniqueID; | 754 | __le64 uniqueID; |
| 755 | uint8_t reserved[24]; | 755 | uint8_t reserved[24]; |
| 756 | } __attribute__ ((packed)); | 756 | } __packed; |
| 757 | 757 | ||
| 758 | /* Path Component (ECMA 167r3 4/14.16.1) */ | 758 | /* Path Component (ECMA 167r3 4/14.16.1) */ |
| 759 | struct pathComponent { | 759 | struct pathComponent { |
| @@ -761,7 +761,7 @@ struct pathComponent { | |||
| 761 | uint8_t lengthComponentIdent; | 761 | uint8_t lengthComponentIdent; |
| 762 | __le16 componentFileVersionNum; | 762 | __le16 componentFileVersionNum; |
| 763 | dstring componentIdent[0]; | 763 | dstring componentIdent[0]; |
| 764 | } __attribute__ ((packed)); | 764 | } __packed; |
| 765 | 765 | ||
| 766 | /* File Entry (ECMA 167r3 4/14.17) */ | 766 | /* File Entry (ECMA 167r3 4/14.17) */ |
| 767 | struct extendedFileEntry { | 767 | struct extendedFileEntry { |
| @@ -791,6 +791,6 @@ struct extendedFileEntry { | |||
| 791 | __le32 lengthAllocDescs; | 791 | __le32 lengthAllocDescs; |
| 792 | uint8_t extendedAttr[0]; | 792 | uint8_t extendedAttr[0]; |
| 793 | uint8_t allocDescs[0]; | 793 | uint8_t allocDescs[0]; |
| 794 | } __attribute__ ((packed)); | 794 | } __packed; |
| 795 | 795 | ||
| 796 | #endif /* _ECMA_167_H */ | 796 | #endif /* _ECMA_167_H */ |
diff --git a/fs/udf/file.c b/fs/udf/file.c index dbcb3a4a0cb9..e04cc0cdca9d 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
| @@ -176,54 +176,46 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 176 | { | 176 | { |
| 177 | struct inode *inode = file_inode(filp); | 177 | struct inode *inode = file_inode(filp); |
| 178 | long old_block, new_block; | 178 | long old_block, new_block; |
| 179 | int result = -EINVAL; | 179 | int result; |
| 180 | 180 | ||
| 181 | if (inode_permission(inode, MAY_READ) != 0) { | 181 | if (inode_permission(inode, MAY_READ) != 0) { |
| 182 | udf_debug("no permission to access inode %lu\n", inode->i_ino); | 182 | udf_debug("no permission to access inode %lu\n", inode->i_ino); |
| 183 | result = -EPERM; | 183 | return -EPERM; |
| 184 | goto out; | ||
| 185 | } | 184 | } |
| 186 | 185 | ||
| 187 | if (!arg) { | 186 | if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) || |
| 187 | (cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) { | ||
| 188 | udf_debug("invalid argument to udf_ioctl\n"); | 188 | udf_debug("invalid argument to udf_ioctl\n"); |
| 189 | result = -EINVAL; | 189 | return -EINVAL; |
| 190 | goto out; | ||
| 191 | } | 190 | } |
| 192 | 191 | ||
| 193 | switch (cmd) { | 192 | switch (cmd) { |
| 194 | case UDF_GETVOLIDENT: | 193 | case UDF_GETVOLIDENT: |
| 195 | if (copy_to_user((char __user *)arg, | 194 | if (copy_to_user((char __user *)arg, |
| 196 | UDF_SB(inode->i_sb)->s_volume_ident, 32)) | 195 | UDF_SB(inode->i_sb)->s_volume_ident, 32)) |
| 197 | result = -EFAULT; | 196 | return -EFAULT; |
| 198 | else | 197 | return 0; |
| 199 | result = 0; | ||
| 200 | goto out; | ||
| 201 | case UDF_RELOCATE_BLOCKS: | 198 | case UDF_RELOCATE_BLOCKS: |
| 202 | if (!capable(CAP_SYS_ADMIN)) { | 199 | if (!capable(CAP_SYS_ADMIN)) |
| 203 | result = -EPERM; | 200 | return -EPERM; |
| 204 | goto out; | 201 | if (get_user(old_block, (long __user *)arg)) |
| 205 | } | 202 | return -EFAULT; |
| 206 | if (get_user(old_block, (long __user *)arg)) { | ||
| 207 | result = -EFAULT; | ||
| 208 | goto out; | ||
| 209 | } | ||
| 210 | result = udf_relocate_blocks(inode->i_sb, | 203 | result = udf_relocate_blocks(inode->i_sb, |
| 211 | old_block, &new_block); | 204 | old_block, &new_block); |
| 212 | if (result == 0) | 205 | if (result == 0) |
| 213 | result = put_user(new_block, (long __user *)arg); | 206 | result = put_user(new_block, (long __user *)arg); |
| 214 | goto out; | 207 | return result; |
| 215 | case UDF_GETEASIZE: | 208 | case UDF_GETEASIZE: |
| 216 | result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg); | 209 | return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg); |
| 217 | goto out; | ||
| 218 | case UDF_GETEABLOCK: | 210 | case UDF_GETEABLOCK: |
| 219 | result = copy_to_user((char __user *)arg, | 211 | return copy_to_user((char __user *)arg, |
| 220 | UDF_I(inode)->i_ext.i_data, | 212 | UDF_I(inode)->i_ext.i_data, |
| 221 | UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0; | 213 | UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0; |
| 222 | goto out; | 214 | default: |
| 215 | return -ENOIOCTLCMD; | ||
| 223 | } | 216 | } |
| 224 | 217 | ||
| 225 | out: | 218 | return 0; |
| 226 | return result; | ||
| 227 | } | 219 | } |
| 228 | 220 | ||
| 229 | static int udf_release_file(struct inode *inode, struct file *filp) | 221 | static int udf_release_file(struct inode *inode, struct file *filp) |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 0f3db71753aa..8ec6b3df0bc7 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
| @@ -43,10 +43,6 @@ | |||
| 43 | #include "udf_i.h" | 43 | #include "udf_i.h" |
| 44 | #include "udf_sb.h" | 44 | #include "udf_sb.h" |
| 45 | 45 | ||
| 46 | MODULE_AUTHOR("Ben Fennema"); | ||
| 47 | MODULE_DESCRIPTION("Universal Disk Format Filesystem"); | ||
| 48 | MODULE_LICENSE("GPL"); | ||
| 49 | |||
| 50 | #define EXTENT_MERGE_SIZE 5 | 46 | #define EXTENT_MERGE_SIZE 5 |
| 51 | 47 | ||
| 52 | static umode_t udf_convert_permissions(struct fileEntry *); | 48 | static umode_t udf_convert_permissions(struct fileEntry *); |
| @@ -57,14 +53,12 @@ static sector_t inode_getblk(struct inode *, sector_t, int *, int *); | |||
| 57 | static int8_t udf_insert_aext(struct inode *, struct extent_position, | 53 | static int8_t udf_insert_aext(struct inode *, struct extent_position, |
| 58 | struct kernel_lb_addr, uint32_t); | 54 | struct kernel_lb_addr, uint32_t); |
| 59 | static void udf_split_extents(struct inode *, int *, int, int, | 55 | static void udf_split_extents(struct inode *, int *, int, int, |
| 60 | struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); | 56 | struct kernel_long_ad *, int *); |
| 61 | static void udf_prealloc_extents(struct inode *, int, int, | 57 | static void udf_prealloc_extents(struct inode *, int, int, |
| 62 | struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); | 58 | struct kernel_long_ad *, int *); |
| 63 | static void udf_merge_extents(struct inode *, | 59 | static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *); |
| 64 | struct kernel_long_ad[EXTENT_MERGE_SIZE], int *); | 60 | static void udf_update_extents(struct inode *, struct kernel_long_ad *, int, |
| 65 | static void udf_update_extents(struct inode *, | 61 | int, struct extent_position *); |
| 66 | struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int, | ||
| 67 | struct extent_position *); | ||
| 68 | static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); | 62 | static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); |
| 69 | 63 | ||
| 70 | static void __udf_clear_extent_cache(struct inode *inode) | 64 | static void __udf_clear_extent_cache(struct inode *inode) |
| @@ -111,7 +105,7 @@ static int udf_read_extent_cache(struct inode *inode, loff_t bcount, | |||
| 111 | 105 | ||
| 112 | /* Add extent to extent cache */ | 106 | /* Add extent to extent cache */ |
| 113 | static void udf_update_extent_cache(struct inode *inode, loff_t estart, | 107 | static void udf_update_extent_cache(struct inode *inode, loff_t estart, |
| 114 | struct extent_position *pos, int next_epos) | 108 | struct extent_position *pos) |
| 115 | { | 109 | { |
| 116 | struct udf_inode_info *iinfo = UDF_I(inode); | 110 | struct udf_inode_info *iinfo = UDF_I(inode); |
| 117 | 111 | ||
| @@ -120,19 +114,16 @@ static void udf_update_extent_cache(struct inode *inode, loff_t estart, | |||
| 120 | __udf_clear_extent_cache(inode); | 114 | __udf_clear_extent_cache(inode); |
| 121 | if (pos->bh) | 115 | if (pos->bh) |
| 122 | get_bh(pos->bh); | 116 | get_bh(pos->bh); |
| 123 | memcpy(&iinfo->cached_extent.epos, pos, | 117 | memcpy(&iinfo->cached_extent.epos, pos, sizeof(struct extent_position)); |
| 124 | sizeof(struct extent_position)); | ||
| 125 | iinfo->cached_extent.lstart = estart; | 118 | iinfo->cached_extent.lstart = estart; |
| 126 | if (next_epos) | 119 | switch (iinfo->i_alloc_type) { |
| 127 | switch (iinfo->i_alloc_type) { | 120 | case ICBTAG_FLAG_AD_SHORT: |
| 128 | case ICBTAG_FLAG_AD_SHORT: | 121 | iinfo->cached_extent.epos.offset -= sizeof(struct short_ad); |
| 129 | iinfo->cached_extent.epos.offset -= | 122 | break; |
| 130 | sizeof(struct short_ad); | 123 | case ICBTAG_FLAG_AD_LONG: |
| 131 | break; | 124 | iinfo->cached_extent.epos.offset -= sizeof(struct long_ad); |
| 132 | case ICBTAG_FLAG_AD_LONG: | 125 | break; |
| 133 | iinfo->cached_extent.epos.offset -= | 126 | } |
| 134 | sizeof(struct long_ad); | ||
| 135 | } | ||
| 136 | spin_unlock(&iinfo->i_extent_cache_lock); | 127 | spin_unlock(&iinfo->i_extent_cache_lock); |
| 137 | } | 128 | } |
| 138 | 129 | ||
| @@ -747,11 +738,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, | |||
| 747 | ~(inode->i_sb->s_blocksize - 1)); | 738 | ~(inode->i_sb->s_blocksize - 1)); |
| 748 | udf_write_aext(inode, &cur_epos, &eloc, elen, 1); | 739 | udf_write_aext(inode, &cur_epos, &eloc, elen, 1); |
| 749 | } | 740 | } |
| 750 | brelse(prev_epos.bh); | ||
| 751 | brelse(cur_epos.bh); | ||
| 752 | brelse(next_epos.bh); | ||
| 753 | newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); | 741 | newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); |
| 754 | return newblock; | 742 | goto out_free; |
| 755 | } | 743 | } |
| 756 | 744 | ||
| 757 | /* Are we beyond EOF? */ | 745 | /* Are we beyond EOF? */ |
| @@ -774,11 +762,9 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, | |||
| 774 | /* Create extents for the hole between EOF and offset */ | 762 | /* Create extents for the hole between EOF and offset */ |
| 775 | ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); | 763 | ret = udf_do_extend_file(inode, &prev_epos, laarr, offset); |
| 776 | if (ret < 0) { | 764 | if (ret < 0) { |
| 777 | brelse(prev_epos.bh); | ||
| 778 | brelse(cur_epos.bh); | ||
| 779 | brelse(next_epos.bh); | ||
| 780 | *err = ret; | 765 | *err = ret; |
| 781 | return 0; | 766 | newblock = 0; |
| 767 | goto out_free; | ||
| 782 | } | 768 | } |
| 783 | c = 0; | 769 | c = 0; |
| 784 | offset = 0; | 770 | offset = 0; |
| @@ -841,11 +827,9 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, | |||
| 841 | iinfo->i_location.partitionReferenceNum, | 827 | iinfo->i_location.partitionReferenceNum, |
| 842 | goal, err); | 828 | goal, err); |
| 843 | if (!newblocknum) { | 829 | if (!newblocknum) { |
| 844 | brelse(prev_epos.bh); | ||
| 845 | brelse(cur_epos.bh); | ||
| 846 | brelse(next_epos.bh); | ||
| 847 | *err = -ENOSPC; | 830 | *err = -ENOSPC; |
| 848 | return 0; | 831 | newblock = 0; |
| 832 | goto out_free; | ||
| 849 | } | 833 | } |
| 850 | if (isBeyondEOF) | 834 | if (isBeyondEOF) |
| 851 | iinfo->i_lenExtents += inode->i_sb->s_blocksize; | 835 | iinfo->i_lenExtents += inode->i_sb->s_blocksize; |
| @@ -857,14 +841,12 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, | |||
| 857 | * block */ | 841 | * block */ |
| 858 | udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); | 842 | udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); |
| 859 | 843 | ||
| 860 | #ifdef UDF_PREALLOCATE | ||
| 861 | /* We preallocate blocks only for regular files. It also makes sense | 844 | /* We preallocate blocks only for regular files. It also makes sense |
| 862 | * for directories but there's a problem when to drop the | 845 | * for directories but there's a problem when to drop the |
| 863 | * preallocation. We might use some delayed work for that but I feel | 846 | * preallocation. We might use some delayed work for that but I feel |
| 864 | * it's overengineering for a filesystem like UDF. */ | 847 | * it's overengineering for a filesystem like UDF. */ |
| 865 | if (S_ISREG(inode->i_mode)) | 848 | if (S_ISREG(inode->i_mode)) |
| 866 | udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); | 849 | udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); |
| 867 | #endif | ||
| 868 | 850 | ||
| 869 | /* merge any continuous blocks in laarr */ | 851 | /* merge any continuous blocks in laarr */ |
| 870 | udf_merge_extents(inode, laarr, &endnum); | 852 | udf_merge_extents(inode, laarr, &endnum); |
| @@ -874,15 +856,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, | |||
| 874 | * the new number of extents is less than the old number */ | 856 | * the new number of extents is less than the old number */ |
| 875 | udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); | 857 | udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); |
| 876 | 858 | ||
| 877 | brelse(prev_epos.bh); | ||
| 878 | brelse(cur_epos.bh); | ||
| 879 | brelse(next_epos.bh); | ||
| 880 | |||
| 881 | newblock = udf_get_pblock(inode->i_sb, newblocknum, | 859 | newblock = udf_get_pblock(inode->i_sb, newblocknum, |
| 882 | iinfo->i_location.partitionReferenceNum, 0); | 860 | iinfo->i_location.partitionReferenceNum, 0); |
| 883 | if (!newblock) { | 861 | if (!newblock) { |
| 884 | *err = -EIO; | 862 | *err = -EIO; |
| 885 | return 0; | 863 | goto out_free; |
| 886 | } | 864 | } |
| 887 | *new = 1; | 865 | *new = 1; |
| 888 | iinfo->i_next_alloc_block = block; | 866 | iinfo->i_next_alloc_block = block; |
| @@ -893,13 +871,15 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, | |||
| 893 | udf_sync_inode(inode); | 871 | udf_sync_inode(inode); |
| 894 | else | 872 | else |
| 895 | mark_inode_dirty(inode); | 873 | mark_inode_dirty(inode); |
| 896 | 874 | out_free: | |
| 875 | brelse(prev_epos.bh); | ||
| 876 | brelse(cur_epos.bh); | ||
| 877 | brelse(next_epos.bh); | ||
| 897 | return newblock; | 878 | return newblock; |
| 898 | } | 879 | } |
| 899 | 880 | ||
| 900 | static void udf_split_extents(struct inode *inode, int *c, int offset, | 881 | static void udf_split_extents(struct inode *inode, int *c, int offset, |
| 901 | int newblocknum, | 882 | int newblocknum, struct kernel_long_ad *laarr, |
| 902 | struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], | ||
| 903 | int *endnum) | 883 | int *endnum) |
| 904 | { | 884 | { |
| 905 | unsigned long blocksize = inode->i_sb->s_blocksize; | 885 | unsigned long blocksize = inode->i_sb->s_blocksize; |
| @@ -963,7 +943,7 @@ static void udf_split_extents(struct inode *inode, int *c, int offset, | |||
| 963 | } | 943 | } |
| 964 | 944 | ||
| 965 | static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, | 945 | static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, |
| 966 | struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], | 946 | struct kernel_long_ad *laarr, |
| 967 | int *endnum) | 947 | int *endnum) |
| 968 | { | 948 | { |
| 969 | int start, length = 0, currlength = 0, i; | 949 | int start, length = 0, currlength = 0, i; |
| @@ -1058,8 +1038,7 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, | |||
| 1058 | } | 1038 | } |
| 1059 | } | 1039 | } |
| 1060 | 1040 | ||
| 1061 | static void udf_merge_extents(struct inode *inode, | 1041 | static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr, |
| 1062 | struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], | ||
| 1063 | int *endnum) | 1042 | int *endnum) |
| 1064 | { | 1043 | { |
| 1065 | int i; | 1044 | int i; |
| @@ -1158,8 +1137,7 @@ static void udf_merge_extents(struct inode *inode, | |||
| 1158 | } | 1137 | } |
| 1159 | } | 1138 | } |
| 1160 | 1139 | ||
| 1161 | static void udf_update_extents(struct inode *inode, | 1140 | static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr, |
| 1162 | struct kernel_long_ad laarr[EXTENT_MERGE_SIZE], | ||
| 1163 | int startnum, int endnum, | 1141 | int startnum, int endnum, |
| 1164 | struct extent_position *epos) | 1142 | struct extent_position *epos) |
| 1165 | { | 1143 | { |
| @@ -1299,6 +1277,12 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode) | |||
| 1299 | int ret = -EIO; | 1277 | int ret = -EIO; |
| 1300 | 1278 | ||
| 1301 | reread: | 1279 | reread: |
| 1280 | if (iloc->partitionReferenceNum >= sbi->s_partitions) { | ||
| 1281 | udf_debug("partition reference: %d > logical volume partitions: %d\n", | ||
| 1282 | iloc->partitionReferenceNum, sbi->s_partitions); | ||
| 1283 | return -EIO; | ||
| 1284 | } | ||
| 1285 | |||
| 1302 | if (iloc->logicalBlockNum >= | 1286 | if (iloc->logicalBlockNum >= |
| 1303 | sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { | 1287 | sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { |
| 1304 | udf_debug("block=%d, partition=%d out of range\n", | 1288 | udf_debug("block=%d, partition=%d out of range\n", |
| @@ -1549,7 +1533,7 @@ reread: | |||
| 1549 | break; | 1533 | break; |
| 1550 | case ICBTAG_FILE_TYPE_SYMLINK: | 1534 | case ICBTAG_FILE_TYPE_SYMLINK: |
| 1551 | inode->i_data.a_ops = &udf_symlink_aops; | 1535 | inode->i_data.a_ops = &udf_symlink_aops; |
| 1552 | inode->i_op = &page_symlink_inode_operations; | 1536 | inode->i_op = &udf_symlink_inode_operations; |
| 1553 | inode_nohighmem(inode); | 1537 | inode_nohighmem(inode); |
| 1554 | inode->i_mode = S_IFLNK | S_IRWXUGO; | 1538 | inode->i_mode = S_IFLNK | S_IRWXUGO; |
| 1555 | break; | 1539 | break; |
| @@ -1627,6 +1611,14 @@ static int udf_sync_inode(struct inode *inode) | |||
| 1627 | return udf_update_inode(inode, 1); | 1611 | return udf_update_inode(inode, 1); |
| 1628 | } | 1612 | } |
| 1629 | 1613 | ||
| 1614 | static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec time) | ||
| 1615 | { | ||
| 1616 | if (iinfo->i_crtime.tv_sec > time.tv_sec || | ||
| 1617 | (iinfo->i_crtime.tv_sec == time.tv_sec && | ||
| 1618 | iinfo->i_crtime.tv_nsec > time.tv_nsec)) | ||
| 1619 | iinfo->i_crtime = time; | ||
| 1620 | } | ||
| 1621 | |||
| 1630 | static int udf_update_inode(struct inode *inode, int do_sync) | 1622 | static int udf_update_inode(struct inode *inode, int do_sync) |
| 1631 | { | 1623 | { |
| 1632 | struct buffer_head *bh = NULL; | 1624 | struct buffer_head *bh = NULL; |
| @@ -1753,20 +1745,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
| 1753 | efe->objectSize = cpu_to_le64(inode->i_size); | 1745 | efe->objectSize = cpu_to_le64(inode->i_size); |
| 1754 | efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); | 1746 | efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); |
| 1755 | 1747 | ||
| 1756 | if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || | 1748 | udf_adjust_time(iinfo, inode->i_atime); |
| 1757 | (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && | 1749 | udf_adjust_time(iinfo, inode->i_mtime); |
| 1758 | iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec)) | 1750 | udf_adjust_time(iinfo, inode->i_ctime); |
| 1759 | iinfo->i_crtime = inode->i_atime; | ||
| 1760 | |||
| 1761 | if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec || | ||
| 1762 | (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec && | ||
| 1763 | iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec)) | ||
| 1764 | iinfo->i_crtime = inode->i_mtime; | ||
| 1765 | |||
| 1766 | if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec || | ||
| 1767 | (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec && | ||
| 1768 | iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) | ||
| 1769 | iinfo->i_crtime = inode->i_ctime; | ||
| 1770 | 1751 | ||
| 1771 | udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); | 1752 | udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); |
| 1772 | udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); | 1753 | udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); |
| @@ -2286,8 +2267,7 @@ int8_t inode_bmap(struct inode *inode, sector_t block, | |||
| 2286 | uint32_t *elen, sector_t *offset) | 2267 | uint32_t *elen, sector_t *offset) |
| 2287 | { | 2268 | { |
| 2288 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; | 2269 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; |
| 2289 | loff_t lbcount = 0, bcount = | 2270 | loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; |
| 2290 | (loff_t) block << blocksize_bits; | ||
| 2291 | int8_t etype; | 2271 | int8_t etype; |
| 2292 | struct udf_inode_info *iinfo; | 2272 | struct udf_inode_info *iinfo; |
| 2293 | 2273 | ||
| @@ -2308,7 +2288,7 @@ int8_t inode_bmap(struct inode *inode, sector_t block, | |||
| 2308 | lbcount += *elen; | 2288 | lbcount += *elen; |
| 2309 | } while (lbcount <= bcount); | 2289 | } while (lbcount <= bcount); |
| 2310 | /* update extent cache */ | 2290 | /* update extent cache */ |
| 2311 | udf_update_extent_cache(inode, lbcount - *elen, pos, 1); | 2291 | udf_update_extent_cache(inode, lbcount - *elen, pos); |
| 2312 | *offset = (bcount + *elen - lbcount) >> blocksize_bits; | 2292 | *offset = (bcount + *elen - lbcount) >> blocksize_bits; |
| 2313 | 2293 | ||
| 2314 | return etype; | 2294 | return etype; |
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c index 6ad5a453af97..5c7ec121990d 100644 --- a/fs/udf/lowlevel.c +++ b/fs/udf/lowlevel.c | |||
| @@ -58,7 +58,7 @@ unsigned long udf_get_last_block(struct super_block *sb) | |||
| 58 | */ | 58 | */ |
| 59 | if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) || | 59 | if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) || |
| 60 | lblock == 0) | 60 | lblock == 0) |
| 61 | lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits; | 61 | lblock = i_size_read(bdev->bd_inode) >> sb->s_blocksize_bits; |
| 62 | 62 | ||
| 63 | if (lblock) | 63 | if (lblock) |
| 64 | return lblock - 1; | 64 | return lblock - 1; |
diff --git a/fs/udf/misc.c b/fs/udf/misc.c index 71d1c25f360d..3949c4bec3a3 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c | |||
| @@ -141,8 +141,6 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, | |||
| 141 | iinfo->i_lenEAttr += size; | 141 | iinfo->i_lenEAttr += size; |
| 142 | return (struct genericFormat *)&ea[offset]; | 142 | return (struct genericFormat *)&ea[offset]; |
| 143 | } | 143 | } |
| 144 | if (loc & 0x02) | ||
| 145 | ; | ||
| 146 | 144 | ||
| 147 | return NULL; | 145 | return NULL; |
| 148 | } | 146 | } |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 2d65e280748b..babf48d0e553 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
| @@ -931,7 +931,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
| 931 | } | 931 | } |
| 932 | 932 | ||
| 933 | inode->i_data.a_ops = &udf_symlink_aops; | 933 | inode->i_data.a_ops = &udf_symlink_aops; |
| 934 | inode->i_op = &page_symlink_inode_operations; | 934 | inode->i_op = &udf_symlink_inode_operations; |
| 935 | inode_nohighmem(inode); | 935 | inode_nohighmem(inode); |
| 936 | 936 | ||
| 937 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { | 937 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { |
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h index fbff74654df2..a4da59e38b7f 100644 --- a/fs/udf/osta_udf.h +++ b/fs/udf/osta_udf.h | |||
| @@ -70,17 +70,17 @@ struct UDFIdentSuffix { | |||
| 70 | uint8_t OSClass; | 70 | uint8_t OSClass; |
| 71 | uint8_t OSIdentifier; | 71 | uint8_t OSIdentifier; |
| 72 | uint8_t reserved[4]; | 72 | uint8_t reserved[4]; |
| 73 | } __attribute__ ((packed)); | 73 | } __packed; |
| 74 | 74 | ||
| 75 | struct impIdentSuffix { | 75 | struct impIdentSuffix { |
| 76 | uint8_t OSClass; | 76 | uint8_t OSClass; |
| 77 | uint8_t OSIdentifier; | 77 | uint8_t OSIdentifier; |
| 78 | uint8_t reserved[6]; | 78 | uint8_t reserved[6]; |
| 79 | } __attribute__ ((packed)); | 79 | } __packed; |
| 80 | 80 | ||
| 81 | struct appIdentSuffix { | 81 | struct appIdentSuffix { |
| 82 | uint8_t impUse[8]; | 82 | uint8_t impUse[8]; |
| 83 | } __attribute__ ((packed)); | 83 | } __packed; |
| 84 | 84 | ||
| 85 | /* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */ | 85 | /* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */ |
| 86 | /* Implementation Use (UDF 2.50 2.2.6.4) */ | 86 | /* Implementation Use (UDF 2.50 2.2.6.4) */ |
| @@ -92,7 +92,7 @@ struct logicalVolIntegrityDescImpUse { | |||
| 92 | __le16 minUDFWriteRev; | 92 | __le16 minUDFWriteRev; |
| 93 | __le16 maxUDFWriteRev; | 93 | __le16 maxUDFWriteRev; |
| 94 | uint8_t impUse[0]; | 94 | uint8_t impUse[0]; |
| 95 | } __attribute__ ((packed)); | 95 | } __packed; |
| 96 | 96 | ||
| 97 | /* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */ | 97 | /* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */ |
| 98 | /* Implementation Use (UDF 2.50 2.2.7.2) */ | 98 | /* Implementation Use (UDF 2.50 2.2.7.2) */ |
| @@ -104,7 +104,7 @@ struct impUseVolDescImpUse { | |||
| 104 | dstring LVInfo3[36]; | 104 | dstring LVInfo3[36]; |
| 105 | struct regid impIdent; | 105 | struct regid impIdent; |
| 106 | uint8_t impUse[128]; | 106 | uint8_t impUse[128]; |
| 107 | } __attribute__ ((packed)); | 107 | } __packed; |
| 108 | 108 | ||
| 109 | struct udfPartitionMap2 { | 109 | struct udfPartitionMap2 { |
| 110 | uint8_t partitionMapType; | 110 | uint8_t partitionMapType; |
| @@ -113,7 +113,7 @@ struct udfPartitionMap2 { | |||
| 113 | struct regid partIdent; | 113 | struct regid partIdent; |
| 114 | __le16 volSeqNum; | 114 | __le16 volSeqNum; |
| 115 | __le16 partitionNum; | 115 | __le16 partitionNum; |
| 116 | } __attribute__ ((packed)); | 116 | } __packed; |
| 117 | 117 | ||
| 118 | /* Virtual Partition Map (UDF 2.50 2.2.8) */ | 118 | /* Virtual Partition Map (UDF 2.50 2.2.8) */ |
| 119 | struct virtualPartitionMap { | 119 | struct virtualPartitionMap { |
| @@ -124,7 +124,7 @@ struct virtualPartitionMap { | |||
| 124 | __le16 volSeqNum; | 124 | __le16 volSeqNum; |
| 125 | __le16 partitionNum; | 125 | __le16 partitionNum; |
| 126 | uint8_t reserved2[24]; | 126 | uint8_t reserved2[24]; |
| 127 | } __attribute__ ((packed)); | 127 | } __packed; |
| 128 | 128 | ||
| 129 | /* Sparable Partition Map (UDF 2.50 2.2.9) */ | 129 | /* Sparable Partition Map (UDF 2.50 2.2.9) */ |
| 130 | struct sparablePartitionMap { | 130 | struct sparablePartitionMap { |
| @@ -139,7 +139,7 @@ struct sparablePartitionMap { | |||
| 139 | uint8_t reserved2[1]; | 139 | uint8_t reserved2[1]; |
| 140 | __le32 sizeSparingTable; | 140 | __le32 sizeSparingTable; |
| 141 | __le32 locSparingTable[4]; | 141 | __le32 locSparingTable[4]; |
| 142 | } __attribute__ ((packed)); | 142 | } __packed; |
| 143 | 143 | ||
| 144 | /* Metadata Partition Map (UDF 2.4.0 2.2.10) */ | 144 | /* Metadata Partition Map (UDF 2.4.0 2.2.10) */ |
| 145 | struct metadataPartitionMap { | 145 | struct metadataPartitionMap { |
| @@ -156,14 +156,14 @@ struct metadataPartitionMap { | |||
| 156 | __le16 alignUnitSize; | 156 | __le16 alignUnitSize; |
| 157 | uint8_t flags; | 157 | uint8_t flags; |
| 158 | uint8_t reserved2[5]; | 158 | uint8_t reserved2[5]; |
| 159 | } __attribute__ ((packed)); | 159 | } __packed; |
| 160 | 160 | ||
| 161 | /* Virtual Allocation Table (UDF 1.5 2.2.10) */ | 161 | /* Virtual Allocation Table (UDF 1.5 2.2.10) */ |
| 162 | struct virtualAllocationTable15 { | 162 | struct virtualAllocationTable15 { |
| 163 | __le32 VirtualSector[0]; | 163 | __le32 VirtualSector[0]; |
| 164 | struct regid vatIdent; | 164 | struct regid vatIdent; |
| 165 | __le32 previousVATICBLoc; | 165 | __le32 previousVATICBLoc; |
| 166 | } __attribute__ ((packed)); | 166 | } __packed; |
| 167 | 167 | ||
| 168 | #define ICBTAG_FILE_TYPE_VAT15 0x00U | 168 | #define ICBTAG_FILE_TYPE_VAT15 0x00U |
| 169 | 169 | ||
| @@ -181,7 +181,7 @@ struct virtualAllocationTable20 { | |||
| 181 | __le16 reserved; | 181 | __le16 reserved; |
| 182 | uint8_t impUse[0]; | 182 | uint8_t impUse[0]; |
| 183 | __le32 vatEntry[0]; | 183 | __le32 vatEntry[0]; |
| 184 | } __attribute__ ((packed)); | 184 | } __packed; |
| 185 | 185 | ||
| 186 | #define ICBTAG_FILE_TYPE_VAT20 0xF8U | 186 | #define ICBTAG_FILE_TYPE_VAT20 0xF8U |
| 187 | 187 | ||
| @@ -189,7 +189,7 @@ struct virtualAllocationTable20 { | |||
| 189 | struct sparingEntry { | 189 | struct sparingEntry { |
| 190 | __le32 origLocation; | 190 | __le32 origLocation; |
| 191 | __le32 mappedLocation; | 191 | __le32 mappedLocation; |
| 192 | } __attribute__ ((packed)); | 192 | } __packed; |
| 193 | 193 | ||
| 194 | struct sparingTable { | 194 | struct sparingTable { |
| 195 | struct tag descTag; | 195 | struct tag descTag; |
| @@ -199,7 +199,7 @@ struct sparingTable { | |||
| 199 | __le32 sequenceNum; | 199 | __le32 sequenceNum; |
| 200 | struct sparingEntry | 200 | struct sparingEntry |
| 201 | mapEntry[0]; | 201 | mapEntry[0]; |
| 202 | } __attribute__ ((packed)); | 202 | } __packed; |
| 203 | 203 | ||
| 204 | /* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */ | 204 | /* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */ |
| 205 | #define ICBTAG_FILE_TYPE_MAIN 0xFA | 205 | #define ICBTAG_FILE_TYPE_MAIN 0xFA |
| @@ -210,7 +210,7 @@ struct sparingTable { | |||
| 210 | struct allocDescImpUse { | 210 | struct allocDescImpUse { |
| 211 | __le16 flags; | 211 | __le16 flags; |
| 212 | uint8_t impUse[4]; | 212 | uint8_t impUse[4]; |
| 213 | } __attribute__ ((packed)); | 213 | } __packed; |
| 214 | 214 | ||
| 215 | #define AD_IU_EXT_ERASED 0x0001 | 215 | #define AD_IU_EXT_ERASED 0x0001 |
| 216 | 216 | ||
| @@ -222,7 +222,7 @@ struct allocDescImpUse { | |||
| 222 | struct freeEaSpace { | 222 | struct freeEaSpace { |
| 223 | __le16 headerChecksum; | 223 | __le16 headerChecksum; |
| 224 | uint8_t freeEASpace[0]; | 224 | uint8_t freeEASpace[0]; |
| 225 | } __attribute__ ((packed)); | 225 | } __packed; |
| 226 | 226 | ||
| 227 | /* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */ | 227 | /* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */ |
| 228 | struct DVDCopyrightImpUse { | 228 | struct DVDCopyrightImpUse { |
| @@ -230,14 +230,14 @@ struct DVDCopyrightImpUse { | |||
| 230 | uint8_t CGMSInfo; | 230 | uint8_t CGMSInfo; |
| 231 | uint8_t dataType; | 231 | uint8_t dataType; |
| 232 | uint8_t protectionSystemInfo[4]; | 232 | uint8_t protectionSystemInfo[4]; |
| 233 | } __attribute__ ((packed)); | 233 | } __packed; |
| 234 | 234 | ||
| 235 | /* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */ | 235 | /* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */ |
| 236 | /* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */ | 236 | /* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */ |
| 237 | struct freeAppEASpace { | 237 | struct freeAppEASpace { |
| 238 | __le16 headerChecksum; | 238 | __le16 headerChecksum; |
| 239 | uint8_t freeEASpace[0]; | 239 | uint8_t freeEASpace[0]; |
| 240 | } __attribute__ ((packed)); | 240 | } __packed; |
| 241 | 241 | ||
| 242 | /* UDF Defined System Stream (UDF 2.50 3.3.7) */ | 242 | /* UDF Defined System Stream (UDF 2.50 3.3.7) */ |
| 243 | #define UDF_ID_UNIQUE_ID "*UDF Unique ID Mapping Data" | 243 | #define UDF_ID_UNIQUE_ID "*UDF Unique ID Mapping Data" |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 4942549e7dc8..14b4bc1f6801 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
| @@ -264,9 +264,6 @@ static void __exit exit_udf_fs(void) | |||
| 264 | destroy_inodecache(); | 264 | destroy_inodecache(); |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | module_init(init_udf_fs) | ||
| 268 | module_exit(exit_udf_fs) | ||
| 269 | |||
| 270 | static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) | 267 | static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) |
| 271 | { | 268 | { |
| 272 | struct udf_sb_info *sbi = UDF_SB(sb); | 269 | struct udf_sb_info *sbi = UDF_SB(sb); |
| @@ -1216,7 +1213,8 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) | |||
| 1216 | struct udf_inode_info *vati; | 1213 | struct udf_inode_info *vati; |
| 1217 | uint32_t pos; | 1214 | uint32_t pos; |
| 1218 | struct virtualAllocationTable20 *vat20; | 1215 | struct virtualAllocationTable20 *vat20; |
| 1219 | sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; | 1216 | sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >> |
| 1217 | sb->s_blocksize_bits; | ||
| 1220 | 1218 | ||
| 1221 | udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); | 1219 | udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); |
| 1222 | if (!sbi->s_vat_inode && | 1220 | if (!sbi->s_vat_inode && |
| @@ -1806,7 +1804,7 @@ static int udf_check_anchor_block(struct super_block *sb, sector_t block, | |||
| 1806 | 1804 | ||
| 1807 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) && | 1805 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) && |
| 1808 | udf_fixed_to_variable(block) >= | 1806 | udf_fixed_to_variable(block) >= |
| 1809 | sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits) | 1807 | i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits) |
| 1810 | return -EAGAIN; | 1808 | return -EAGAIN; |
| 1811 | 1809 | ||
| 1812 | bh = udf_read_tagged(sb, block, block, &ident); | 1810 | bh = udf_read_tagged(sb, block, block, &ident); |
| @@ -1868,7 +1866,7 @@ static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock, | |||
| 1868 | last[last_count++] = *lastblock - 152; | 1866 | last[last_count++] = *lastblock - 152; |
| 1869 | 1867 | ||
| 1870 | for (i = 0; i < last_count; i++) { | 1868 | for (i = 0; i < last_count; i++) { |
| 1871 | if (last[i] >= sb->s_bdev->bd_inode->i_size >> | 1869 | if (last[i] >= i_size_read(sb->s_bdev->bd_inode) >> |
| 1872 | sb->s_blocksize_bits) | 1870 | sb->s_blocksize_bits) |
| 1873 | continue; | 1871 | continue; |
| 1874 | ret = udf_check_anchor_block(sb, last[i], fileset); | 1872 | ret = udf_check_anchor_block(sb, last[i], fileset); |
| @@ -1957,7 +1955,7 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt, | |||
| 1957 | if (!nsr_off) { | 1955 | if (!nsr_off) { |
| 1958 | if (!silent) | 1956 | if (!silent) |
| 1959 | udf_warn(sb, "No VRS found\n"); | 1957 | udf_warn(sb, "No VRS found\n"); |
| 1960 | return 0; | 1958 | return -EINVAL; |
| 1961 | } | 1959 | } |
| 1962 | if (nsr_off == -1) | 1960 | if (nsr_off == -1) |
| 1963 | udf_debug("Failed to read sector at offset %d. " | 1961 | udf_debug("Failed to read sector at offset %d. " |
| @@ -1986,6 +1984,7 @@ static void udf_open_lvid(struct super_block *sb) | |||
| 1986 | struct buffer_head *bh = sbi->s_lvid_bh; | 1984 | struct buffer_head *bh = sbi->s_lvid_bh; |
| 1987 | struct logicalVolIntegrityDesc *lvid; | 1985 | struct logicalVolIntegrityDesc *lvid; |
| 1988 | struct logicalVolIntegrityDescImpUse *lvidiu; | 1986 | struct logicalVolIntegrityDescImpUse *lvidiu; |
| 1987 | struct timespec ts; | ||
| 1989 | 1988 | ||
| 1990 | if (!bh) | 1989 | if (!bh) |
| 1991 | return; | 1990 | return; |
| @@ -1997,8 +1996,8 @@ static void udf_open_lvid(struct super_block *sb) | |||
| 1997 | mutex_lock(&sbi->s_alloc_mutex); | 1996 | mutex_lock(&sbi->s_alloc_mutex); |
| 1998 | lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; | 1997 | lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; |
| 1999 | lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | 1998 | lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; |
| 2000 | udf_time_to_disk_stamp(&lvid->recordingDateAndTime, | 1999 | ktime_get_real_ts(&ts); |
| 2001 | CURRENT_TIME); | 2000 | udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); |
| 2002 | lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); | 2001 | lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); |
| 2003 | 2002 | ||
| 2004 | lvid->descTag.descCRC = cpu_to_le16( | 2003 | lvid->descTag.descCRC = cpu_to_le16( |
| @@ -2019,6 +2018,7 @@ static void udf_close_lvid(struct super_block *sb) | |||
| 2019 | struct buffer_head *bh = sbi->s_lvid_bh; | 2018 | struct buffer_head *bh = sbi->s_lvid_bh; |
| 2020 | struct logicalVolIntegrityDesc *lvid; | 2019 | struct logicalVolIntegrityDesc *lvid; |
| 2021 | struct logicalVolIntegrityDescImpUse *lvidiu; | 2020 | struct logicalVolIntegrityDescImpUse *lvidiu; |
| 2021 | struct timespec ts; | ||
| 2022 | 2022 | ||
| 2023 | if (!bh) | 2023 | if (!bh) |
| 2024 | return; | 2024 | return; |
| @@ -2030,7 +2030,8 @@ static void udf_close_lvid(struct super_block *sb) | |||
| 2030 | mutex_lock(&sbi->s_alloc_mutex); | 2030 | mutex_lock(&sbi->s_alloc_mutex); |
| 2031 | lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; | 2031 | lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; |
| 2032 | lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | 2032 | lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; |
| 2033 | udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); | 2033 | ktime_get_real_ts(&ts); |
| 2034 | udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); | ||
| 2034 | if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) | 2035 | if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) |
| 2035 | lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); | 2036 | lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); |
| 2036 | if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) | 2037 | if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) |
| @@ -2158,15 +2159,25 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
| 2158 | ret = udf_load_vrs(sb, &uopt, silent, &fileset); | 2159 | ret = udf_load_vrs(sb, &uopt, silent, &fileset); |
| 2159 | } else { | 2160 | } else { |
| 2160 | uopt.blocksize = bdev_logical_block_size(sb->s_bdev); | 2161 | uopt.blocksize = bdev_logical_block_size(sb->s_bdev); |
| 2161 | ret = udf_load_vrs(sb, &uopt, silent, &fileset); | 2162 | while (uopt.blocksize <= 4096) { |
| 2162 | if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { | ||
| 2163 | if (!silent) | ||
| 2164 | pr_notice("Rescanning with blocksize %d\n", | ||
| 2165 | UDF_DEFAULT_BLOCKSIZE); | ||
| 2166 | brelse(sbi->s_lvid_bh); | ||
| 2167 | sbi->s_lvid_bh = NULL; | ||
| 2168 | uopt.blocksize = UDF_DEFAULT_BLOCKSIZE; | ||
| 2169 | ret = udf_load_vrs(sb, &uopt, silent, &fileset); | 2163 | ret = udf_load_vrs(sb, &uopt, silent, &fileset); |
| 2164 | if (ret < 0) { | ||
| 2165 | if (!silent && ret != -EACCES) { | ||
| 2166 | pr_notice("Scanning with blocksize %d failed\n", | ||
| 2167 | uopt.blocksize); | ||
| 2168 | } | ||
| 2169 | brelse(sbi->s_lvid_bh); | ||
| 2170 | sbi->s_lvid_bh = NULL; | ||
| 2171 | /* | ||
| 2172 | * EACCES is special - we want to propagate to | ||
| 2173 | * upper layers that we cannot handle RW mount. | ||
| 2174 | */ | ||
| 2175 | if (ret == -EACCES) | ||
| 2176 | break; | ||
| 2177 | } else | ||
| 2178 | break; | ||
| 2179 | |||
| 2180 | uopt.blocksize <<= 1; | ||
| 2170 | } | 2181 | } |
| 2171 | } | 2182 | } |
| 2172 | if (ret < 0) { | 2183 | if (ret < 0) { |
| @@ -2497,3 +2508,9 @@ static unsigned int udf_count_free(struct super_block *sb) | |||
| 2497 | 2508 | ||
| 2498 | return accum; | 2509 | return accum; |
| 2499 | } | 2510 | } |
| 2511 | |||
| 2512 | MODULE_AUTHOR("Ben Fennema"); | ||
| 2513 | MODULE_DESCRIPTION("Universal Disk Format Filesystem"); | ||
| 2514 | MODULE_LICENSE("GPL"); | ||
| 2515 | module_init(init_udf_fs) | ||
| 2516 | module_exit(exit_udf_fs) | ||
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index 8d619773056b..f7dfef53f739 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c | |||
| @@ -152,9 +152,39 @@ out_unmap: | |||
| 152 | return err; | 152 | return err; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static int udf_symlink_getattr(struct vfsmount *mnt, struct dentry *dentry, | ||
| 156 | struct kstat *stat) | ||
| 157 | { | ||
| 158 | struct inode *inode = d_backing_inode(dentry); | ||
| 159 | struct page *page; | ||
| 160 | |||
| 161 | generic_fillattr(inode, stat); | ||
| 162 | page = read_mapping_page(inode->i_mapping, 0, NULL); | ||
| 163 | if (IS_ERR(page)) | ||
| 164 | return PTR_ERR(page); | ||
| 165 | /* | ||
| 166 | * UDF uses non-trivial encoding of symlinks so i_size does not match | ||
| 167 | * number of characters reported by readlink(2) which apparently some | ||
| 168 | * applications expect. Also POSIX says that "The value returned in the | ||
| 169 | * st_size field shall be the length of the contents of the symbolic | ||
| 170 | * link, and shall not count a trailing null if one is present." So | ||
| 171 | * let's report the length of string returned by readlink(2) for | ||
| 172 | * st_size. | ||
| 173 | */ | ||
| 174 | stat->size = strlen(page_address(page)); | ||
| 175 | put_page(page); | ||
| 176 | |||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 155 | /* | 180 | /* |
| 156 | * symlinks can't do much... | 181 | * symlinks can't do much... |
| 157 | */ | 182 | */ |
| 158 | const struct address_space_operations udf_symlink_aops = { | 183 | const struct address_space_operations udf_symlink_aops = { |
| 159 | .readpage = udf_symlink_filler, | 184 | .readpage = udf_symlink_filler, |
| 160 | }; | 185 | }; |
| 186 | |||
| 187 | const struct inode_operations udf_symlink_inode_operations = { | ||
| 188 | .get_link = page_get_link, | ||
| 189 | .getattr = udf_symlink_getattr, | ||
| 190 | }; | ||
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 263829ef1873..63b034984378 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include "udfend.h" | 15 | #include "udfend.h" |
| 16 | #include "udf_i.h" | 16 | #include "udf_i.h" |
| 17 | 17 | ||
| 18 | #define UDF_PREALLOCATE | ||
| 19 | #define UDF_DEFAULT_PREALLOC_BLOCKS 8 | 18 | #define UDF_DEFAULT_PREALLOC_BLOCKS 8 |
| 20 | 19 | ||
| 21 | extern __printf(3, 4) void _udf_err(struct super_block *sb, | 20 | extern __printf(3, 4) void _udf_err(struct super_block *sb, |
| @@ -85,6 +84,7 @@ extern const struct inode_operations udf_dir_inode_operations; | |||
| 85 | extern const struct file_operations udf_dir_operations; | 84 | extern const struct file_operations udf_dir_operations; |
| 86 | extern const struct inode_operations udf_file_inode_operations; | 85 | extern const struct inode_operations udf_file_inode_operations; |
| 87 | extern const struct file_operations udf_file_operations; | 86 | extern const struct file_operations udf_file_operations; |
| 87 | extern const struct inode_operations udf_symlink_inode_operations; | ||
| 88 | extern const struct address_space_operations udf_aops; | 88 | extern const struct address_space_operations udf_aops; |
| 89 | extern const struct address_space_operations udf_adinicb_aops; | 89 | extern const struct address_space_operations udf_adinicb_aops; |
| 90 | extern const struct address_space_operations udf_symlink_aops; | 90 | extern const struct address_space_operations udf_symlink_aops; |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index d96e2f30084b..43953e03c356 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
| @@ -63,6 +63,7 @@ struct userfaultfd_wait_queue { | |||
| 63 | struct uffd_msg msg; | 63 | struct uffd_msg msg; |
| 64 | wait_queue_t wq; | 64 | wait_queue_t wq; |
| 65 | struct userfaultfd_ctx *ctx; | 65 | struct userfaultfd_ctx *ctx; |
| 66 | bool waken; | ||
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | struct userfaultfd_wake_range { | 69 | struct userfaultfd_wake_range { |
| @@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, | |||
| 86 | if (len && (start > uwq->msg.arg.pagefault.address || | 87 | if (len && (start > uwq->msg.arg.pagefault.address || |
| 87 | start + len <= uwq->msg.arg.pagefault.address)) | 88 | start + len <= uwq->msg.arg.pagefault.address)) |
| 88 | goto out; | 89 | goto out; |
| 90 | WRITE_ONCE(uwq->waken, true); | ||
| 91 | /* | ||
| 92 | * The implicit smp_mb__before_spinlock in try_to_wake_up() | ||
| 93 | * renders uwq->waken visible to other CPUs before the task is | ||
| 94 | * waken. | ||
| 95 | */ | ||
| 89 | ret = wake_up_state(wq->private, mode); | 96 | ret = wake_up_state(wq->private, mode); |
| 90 | if (ret) | 97 | if (ret) |
| 91 | /* | 98 | /* |
| @@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
| 264 | struct userfaultfd_wait_queue uwq; | 271 | struct userfaultfd_wait_queue uwq; |
| 265 | int ret; | 272 | int ret; |
| 266 | bool must_wait, return_to_userland; | 273 | bool must_wait, return_to_userland; |
| 274 | long blocking_state; | ||
| 267 | 275 | ||
| 268 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 276 | BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
| 269 | 277 | ||
| @@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
| 334 | uwq.wq.private = current; | 342 | uwq.wq.private = current; |
| 335 | uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); | 343 | uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); |
| 336 | uwq.ctx = ctx; | 344 | uwq.ctx = ctx; |
| 345 | uwq.waken = false; | ||
| 337 | 346 | ||
| 338 | return_to_userland = | 347 | return_to_userland = |
| 339 | (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == | 348 | (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == |
| 340 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); | 349 | (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); |
| 350 | blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : | ||
| 351 | TASK_KILLABLE; | ||
| 341 | 352 | ||
| 342 | spin_lock(&ctx->fault_pending_wqh.lock); | 353 | spin_lock(&ctx->fault_pending_wqh.lock); |
| 343 | /* | 354 | /* |
| @@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
| 350 | * following the spin_unlock to happen before the list_add in | 361 | * following the spin_unlock to happen before the list_add in |
| 351 | * __add_wait_queue. | 362 | * __add_wait_queue. |
| 352 | */ | 363 | */ |
| 353 | set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : | 364 | set_current_state(blocking_state); |
| 354 | TASK_KILLABLE); | ||
| 355 | spin_unlock(&ctx->fault_pending_wqh.lock); | 365 | spin_unlock(&ctx->fault_pending_wqh.lock); |
| 356 | 366 | ||
| 357 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, | 367 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, |
| @@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
| 364 | wake_up_poll(&ctx->fd_wqh, POLLIN); | 374 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
| 365 | schedule(); | 375 | schedule(); |
| 366 | ret |= VM_FAULT_MAJOR; | 376 | ret |= VM_FAULT_MAJOR; |
| 377 | |||
| 378 | /* | ||
| 379 | * False wakeups can orginate even from rwsem before | ||
| 380 | * up_read() however userfaults will wait either for a | ||
| 381 | * targeted wakeup on the specific uwq waitqueue from | ||
| 382 | * wake_userfault() or for signals or for uffd | ||
| 383 | * release. | ||
| 384 | */ | ||
| 385 | while (!READ_ONCE(uwq.waken)) { | ||
| 386 | /* | ||
| 387 | * This needs the full smp_store_mb() | ||
| 388 | * guarantee as the state write must be | ||
| 389 | * visible to other CPUs before reading | ||
| 390 | * uwq.waken from other CPUs. | ||
| 391 | */ | ||
| 392 | set_current_state(blocking_state); | ||
| 393 | if (READ_ONCE(uwq.waken) || | ||
| 394 | READ_ONCE(ctx->released) || | ||
| 395 | (return_to_userland ? signal_pending(current) : | ||
| 396 | fatal_signal_pending(current))) | ||
| 397 | break; | ||
| 398 | schedule(); | ||
| 399 | } | ||
| 367 | } | 400 | } |
| 368 | 401 | ||
| 369 | __set_current_state(TASK_RUNNING); | 402 | __set_current_state(TASK_RUNNING); |
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index e5ebc3770460..33db69be4832 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include "xfs_rmap_btree.h" | 39 | #include "xfs_rmap_btree.h" |
| 40 | #include "xfs_btree.h" | 40 | #include "xfs_btree.h" |
| 41 | #include "xfs_refcount_btree.h" | 41 | #include "xfs_refcount_btree.h" |
| 42 | #include "xfs_ialloc_btree.h" | ||
| 42 | 43 | ||
| 43 | /* | 44 | /* |
| 44 | * Per-AG Block Reservations | 45 | * Per-AG Block Reservations |
| @@ -200,22 +201,30 @@ __xfs_ag_resv_init( | |||
| 200 | struct xfs_mount *mp = pag->pag_mount; | 201 | struct xfs_mount *mp = pag->pag_mount; |
| 201 | struct xfs_ag_resv *resv; | 202 | struct xfs_ag_resv *resv; |
| 202 | int error; | 203 | int error; |
| 204 | xfs_extlen_t reserved; | ||
| 203 | 205 | ||
| 204 | resv = xfs_perag_resv(pag, type); | ||
| 205 | if (used > ask) | 206 | if (used > ask) |
| 206 | ask = used; | 207 | ask = used; |
| 207 | resv->ar_asked = ask; | 208 | reserved = ask - used; |
| 208 | resv->ar_reserved = resv->ar_orig_reserved = ask - used; | ||
| 209 | mp->m_ag_max_usable -= ask; | ||
| 210 | 209 | ||
| 211 | trace_xfs_ag_resv_init(pag, type, ask); | 210 | error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true); |
| 212 | 211 | if (error) { | |
| 213 | error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true); | ||
| 214 | if (error) | ||
| 215 | trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, | 212 | trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, |
| 216 | error, _RET_IP_); | 213 | error, _RET_IP_); |
| 214 | xfs_warn(mp, | ||
| 215 | "Per-AG reservation for AG %u failed. Filesystem may run out of space.", | ||
| 216 | pag->pag_agno); | ||
| 217 | return error; | ||
| 218 | } | ||
| 217 | 219 | ||
| 218 | return error; | 220 | mp->m_ag_max_usable -= ask; |
| 221 | |||
| 222 | resv = xfs_perag_resv(pag, type); | ||
| 223 | resv->ar_asked = ask; | ||
| 224 | resv->ar_reserved = resv->ar_orig_reserved = reserved; | ||
| 225 | |||
| 226 | trace_xfs_ag_resv_init(pag, type, ask); | ||
| 227 | return 0; | ||
| 219 | } | 228 | } |
| 220 | 229 | ||
| 221 | /* Create a per-AG block reservation. */ | 230 | /* Create a per-AG block reservation. */ |
| @@ -223,6 +232,8 @@ int | |||
| 223 | xfs_ag_resv_init( | 232 | xfs_ag_resv_init( |
| 224 | struct xfs_perag *pag) | 233 | struct xfs_perag *pag) |
| 225 | { | 234 | { |
| 235 | struct xfs_mount *mp = pag->pag_mount; | ||
| 236 | xfs_agnumber_t agno = pag->pag_agno; | ||
| 226 | xfs_extlen_t ask; | 237 | xfs_extlen_t ask; |
| 227 | xfs_extlen_t used; | 238 | xfs_extlen_t used; |
| 228 | int error = 0; | 239 | int error = 0; |
| @@ -231,23 +242,45 @@ xfs_ag_resv_init( | |||
| 231 | if (pag->pag_meta_resv.ar_asked == 0) { | 242 | if (pag->pag_meta_resv.ar_asked == 0) { |
| 232 | ask = used = 0; | 243 | ask = used = 0; |
| 233 | 244 | ||
| 234 | error = xfs_refcountbt_calc_reserves(pag->pag_mount, | 245 | error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used); |
| 235 | pag->pag_agno, &ask, &used); | ||
| 236 | if (error) | 246 | if (error) |
| 237 | goto out; | 247 | goto out; |
| 238 | 248 | ||
| 239 | error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, | 249 | error = xfs_finobt_calc_reserves(mp, agno, &ask, &used); |
| 240 | ask, used); | ||
| 241 | if (error) | 250 | if (error) |
| 242 | goto out; | 251 | goto out; |
| 252 | |||
| 253 | error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, | ||
| 254 | ask, used); | ||
| 255 | if (error) { | ||
| 256 | /* | ||
| 257 | * Because we didn't have per-AG reservations when the | ||
| 258 | * finobt feature was added we might not be able to | ||
| 259 | * reserve all needed blocks. Warn and fall back to the | ||
| 260 | * old and potentially buggy code in that case, but | ||
| 261 | * ensure we do have the reservation for the refcountbt. | ||
| 262 | */ | ||
| 263 | ask = used = 0; | ||
| 264 | |||
| 265 | mp->m_inotbt_nores = true; | ||
| 266 | |||
| 267 | error = xfs_refcountbt_calc_reserves(mp, agno, &ask, | ||
| 268 | &used); | ||
| 269 | if (error) | ||
| 270 | goto out; | ||
| 271 | |||
| 272 | error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, | ||
| 273 | ask, used); | ||
| 274 | if (error) | ||
| 275 | goto out; | ||
| 276 | } | ||
| 243 | } | 277 | } |
| 244 | 278 | ||
| 245 | /* Create the AGFL metadata reservation */ | 279 | /* Create the AGFL metadata reservation */ |
| 246 | if (pag->pag_agfl_resv.ar_asked == 0) { | 280 | if (pag->pag_agfl_resv.ar_asked == 0) { |
| 247 | ask = used = 0; | 281 | ask = used = 0; |
| 248 | 282 | ||
| 249 | error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno, | 283 | error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used); |
| 250 | &ask, &used); | ||
| 251 | if (error) | 284 | if (error) |
| 252 | goto out; | 285 | goto out; |
| 253 | 286 | ||
| @@ -256,6 +289,16 @@ xfs_ag_resv_init( | |||
| 256 | goto out; | 289 | goto out; |
| 257 | } | 290 | } |
| 258 | 291 | ||
| 292 | #ifdef DEBUG | ||
| 293 | /* need to read in the AGF for the ASSERT below to work */ | ||
| 294 | error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0); | ||
| 295 | if (error) | ||
| 296 | return error; | ||
| 297 | |||
| 298 | ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + | ||
| 299 | xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <= | ||
| 300 | pag->pagf_freeblks + pag->pagf_flcount); | ||
| 301 | #endif | ||
| 259 | out: | 302 | out: |
| 260 | return error; | 303 | return error; |
| 261 | } | 304 | } |
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 5050056a0b06..9f06a211e157 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c | |||
| @@ -95,10 +95,7 @@ unsigned int | |||
| 95 | xfs_alloc_set_aside( | 95 | xfs_alloc_set_aside( |
| 96 | struct xfs_mount *mp) | 96 | struct xfs_mount *mp) |
| 97 | { | 97 | { |
| 98 | unsigned int blocks; | 98 | return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4); |
| 99 | |||
| 100 | blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE); | ||
| 101 | return blocks; | ||
| 102 | } | 99 | } |
| 103 | 100 | ||
| 104 | /* | 101 | /* |
| @@ -365,36 +362,12 @@ xfs_alloc_fix_len( | |||
| 365 | return; | 362 | return; |
| 366 | ASSERT(rlen >= args->minlen && rlen <= args->maxlen); | 363 | ASSERT(rlen >= args->minlen && rlen <= args->maxlen); |
| 367 | ASSERT(rlen % args->prod == args->mod); | 364 | ASSERT(rlen % args->prod == args->mod); |
| 365 | ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >= | ||
| 366 | rlen + args->minleft); | ||
| 368 | args->len = rlen; | 367 | args->len = rlen; |
| 369 | } | 368 | } |
| 370 | 369 | ||
| 371 | /* | 370 | /* |
| 372 | * Fix up length if there is too little space left in the a.g. | ||
| 373 | * Return 1 if ok, 0 if too little, should give up. | ||
| 374 | */ | ||
| 375 | STATIC int | ||
| 376 | xfs_alloc_fix_minleft( | ||
| 377 | xfs_alloc_arg_t *args) /* allocation argument structure */ | ||
| 378 | { | ||
| 379 | xfs_agf_t *agf; /* a.g. freelist header */ | ||
| 380 | int diff; /* free space difference */ | ||
| 381 | |||
| 382 | if (args->minleft == 0) | ||
| 383 | return 1; | ||
| 384 | agf = XFS_BUF_TO_AGF(args->agbp); | ||
| 385 | diff = be32_to_cpu(agf->agf_freeblks) | ||
| 386 | - args->len - args->minleft; | ||
| 387 | if (diff >= 0) | ||
| 388 | return 1; | ||
| 389 | args->len += diff; /* shrink the allocated space */ | ||
| 390 | /* casts to (int) catch length underflows */ | ||
| 391 | if ((int)args->len >= (int)args->minlen) | ||
| 392 | return 1; | ||
| 393 | args->agbno = NULLAGBLOCK; | ||
| 394 | return 0; | ||
| 395 | } | ||
| 396 | |||
| 397 | /* | ||
| 398 | * Update the two btrees, logically removing from freespace the extent | 371 | * Update the two btrees, logically removing from freespace the extent |
| 399 | * starting at rbno, rlen blocks. The extent is contained within the | 372 | * starting at rbno, rlen blocks. The extent is contained within the |
| 400 | * actual (current) free extent fbno for flen blocks. | 373 | * actual (current) free extent fbno for flen blocks. |
| @@ -689,8 +662,6 @@ xfs_alloc_ag_vextent( | |||
| 689 | xfs_alloc_arg_t *args) /* argument structure for allocation */ | 662 | xfs_alloc_arg_t *args) /* argument structure for allocation */ |
| 690 | { | 663 | { |
| 691 | int error=0; | 664 | int error=0; |
| 692 | xfs_extlen_t reservation; | ||
| 693 | xfs_extlen_t oldmax; | ||
| 694 | 665 | ||
| 695 | ASSERT(args->minlen > 0); | 666 | ASSERT(args->minlen > 0); |
| 696 | ASSERT(args->maxlen > 0); | 667 | ASSERT(args->maxlen > 0); |
| @@ -699,20 +670,6 @@ xfs_alloc_ag_vextent( | |||
| 699 | ASSERT(args->alignment > 0); | 670 | ASSERT(args->alignment > 0); |
| 700 | 671 | ||
| 701 | /* | 672 | /* |
| 702 | * Clamp maxlen to the amount of free space minus any reservations | ||
| 703 | * that have been made. | ||
| 704 | */ | ||
| 705 | oldmax = args->maxlen; | ||
| 706 | reservation = xfs_ag_resv_needed(args->pag, args->resv); | ||
| 707 | if (args->maxlen > args->pag->pagf_freeblks - reservation) | ||
| 708 | args->maxlen = args->pag->pagf_freeblks - reservation; | ||
| 709 | if (args->maxlen == 0) { | ||
| 710 | args->agbno = NULLAGBLOCK; | ||
| 711 | args->maxlen = oldmax; | ||
| 712 | return 0; | ||
| 713 | } | ||
| 714 | |||
| 715 | /* | ||
| 716 | * Branch to correct routine based on the type. | 673 | * Branch to correct routine based on the type. |
| 717 | */ | 674 | */ |
| 718 | args->wasfromfl = 0; | 675 | args->wasfromfl = 0; |
| @@ -731,8 +688,6 @@ xfs_alloc_ag_vextent( | |||
| 731 | /* NOTREACHED */ | 688 | /* NOTREACHED */ |
| 732 | } | 689 | } |
| 733 | 690 | ||
| 734 | args->maxlen = oldmax; | ||
| 735 | |||
| 736 | if (error || args->agbno == NULLAGBLOCK) | 691 | if (error || args->agbno == NULLAGBLOCK) |
| 737 | return error; | 692 | return error; |
| 738 | 693 | ||
| @@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact( | |||
| 841 | args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) | 796 | args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) |
| 842 | - args->agbno; | 797 | - args->agbno; |
| 843 | xfs_alloc_fix_len(args); | 798 | xfs_alloc_fix_len(args); |
| 844 | if (!xfs_alloc_fix_minleft(args)) | ||
| 845 | goto not_found; | ||
| 846 | |||
| 847 | ASSERT(args->agbno + args->len <= tend); | 799 | ASSERT(args->agbno + args->len <= tend); |
| 848 | 800 | ||
| 849 | /* | 801 | /* |
| @@ -1149,12 +1101,7 @@ restart: | |||
| 1149 | XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); | 1101 | XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); |
| 1150 | ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); | 1102 | ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
| 1151 | args->len = blen; | 1103 | args->len = blen; |
| 1152 | if (!xfs_alloc_fix_minleft(args)) { | 1104 | |
| 1153 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | ||
| 1154 | trace_xfs_alloc_near_nominleft(args); | ||
| 1155 | return 0; | ||
| 1156 | } | ||
| 1157 | blen = args->len; | ||
| 1158 | /* | 1105 | /* |
| 1159 | * We are allocating starting at bnew for blen blocks. | 1106 | * We are allocating starting at bnew for blen blocks. |
| 1160 | */ | 1107 | */ |
| @@ -1346,12 +1293,6 @@ restart: | |||
| 1346 | */ | 1293 | */ |
| 1347 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); | 1294 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); |
| 1348 | xfs_alloc_fix_len(args); | 1295 | xfs_alloc_fix_len(args); |
| 1349 | if (!xfs_alloc_fix_minleft(args)) { | ||
| 1350 | trace_xfs_alloc_near_nominleft(args); | ||
| 1351 | xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); | ||
| 1352 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | ||
| 1353 | return 0; | ||
| 1354 | } | ||
| 1355 | rlen = args->len; | 1296 | rlen = args->len; |
| 1356 | (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, | 1297 | (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, |
| 1357 | args->datatype, ltbnoa, ltlena, <new); | 1298 | args->datatype, ltbnoa, ltlena, <new); |
| @@ -1553,8 +1494,6 @@ restart: | |||
| 1553 | } | 1494 | } |
| 1554 | xfs_alloc_fix_len(args); | 1495 | xfs_alloc_fix_len(args); |
| 1555 | 1496 | ||
| 1556 | if (!xfs_alloc_fix_minleft(args)) | ||
| 1557 | goto out_nominleft; | ||
| 1558 | rlen = args->len; | 1497 | rlen = args->len; |
| 1559 | XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0); | 1498 | XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0); |
| 1560 | /* | 1499 | /* |
| @@ -2056,7 +1995,7 @@ xfs_alloc_space_available( | |||
| 2056 | int flags) | 1995 | int flags) |
| 2057 | { | 1996 | { |
| 2058 | struct xfs_perag *pag = args->pag; | 1997 | struct xfs_perag *pag = args->pag; |
| 2059 | xfs_extlen_t longest; | 1998 | xfs_extlen_t alloc_len, longest; |
| 2060 | xfs_extlen_t reservation; /* blocks that are still reserved */ | 1999 | xfs_extlen_t reservation; /* blocks that are still reserved */ |
| 2061 | int available; | 2000 | int available; |
| 2062 | 2001 | ||
| @@ -2066,17 +2005,28 @@ xfs_alloc_space_available( | |||
| 2066 | reservation = xfs_ag_resv_needed(pag, args->resv); | 2005 | reservation = xfs_ag_resv_needed(pag, args->resv); |
| 2067 | 2006 | ||
| 2068 | /* do we have enough contiguous free space for the allocation? */ | 2007 | /* do we have enough contiguous free space for the allocation? */ |
| 2008 | alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop; | ||
| 2069 | longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free, | 2009 | longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free, |
| 2070 | reservation); | 2010 | reservation); |
| 2071 | if ((args->minlen + args->alignment + args->minalignslop - 1) > longest) | 2011 | if (longest < alloc_len) |
| 2072 | return false; | 2012 | return false; |
| 2073 | 2013 | ||
| 2074 | /* do we have enough free space remaining for the allocation? */ | 2014 | /* do we have enough free space remaining for the allocation? */ |
| 2075 | available = (int)(pag->pagf_freeblks + pag->pagf_flcount - | 2015 | available = (int)(pag->pagf_freeblks + pag->pagf_flcount - |
| 2076 | reservation - min_free - args->total); | 2016 | reservation - min_free - args->minleft); |
| 2077 | if (available < (int)args->minleft || available <= 0) | 2017 | if (available < (int)max(args->total, alloc_len)) |
| 2078 | return false; | 2018 | return false; |
| 2079 | 2019 | ||
| 2020 | /* | ||
| 2021 | * Clamp maxlen to the amount of free space available for the actual | ||
| 2022 | * extent allocation. | ||
| 2023 | */ | ||
| 2024 | if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) { | ||
| 2025 | args->maxlen = available; | ||
| 2026 | ASSERT(args->maxlen > 0); | ||
| 2027 | ASSERT(args->maxlen >= args->minlen); | ||
| 2028 | } | ||
| 2029 | |||
| 2080 | return true; | 2030 | return true; |
| 2081 | } | 2031 | } |
| 2082 | 2032 | ||
| @@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist( | |||
| 2122 | } | 2072 | } |
| 2123 | 2073 | ||
| 2124 | need = xfs_alloc_min_freelist(mp, pag); | 2074 | need = xfs_alloc_min_freelist(mp, pag); |
| 2125 | if (!xfs_alloc_space_available(args, need, flags)) | 2075 | if (!xfs_alloc_space_available(args, need, flags | |
| 2076 | XFS_ALLOC_FLAG_CHECK)) | ||
| 2126 | goto out_agbp_relse; | 2077 | goto out_agbp_relse; |
| 2127 | 2078 | ||
| 2128 | /* | 2079 | /* |
| @@ -2638,12 +2589,10 @@ xfs_alloc_vextent( | |||
| 2638 | xfs_agblock_t agsize; /* allocation group size */ | 2589 | xfs_agblock_t agsize; /* allocation group size */ |
| 2639 | int error; | 2590 | int error; |
| 2640 | int flags; /* XFS_ALLOC_FLAG_... locking flags */ | 2591 | int flags; /* XFS_ALLOC_FLAG_... locking flags */ |
| 2641 | xfs_extlen_t minleft;/* minimum left value, temp copy */ | ||
| 2642 | xfs_mount_t *mp; /* mount structure pointer */ | 2592 | xfs_mount_t *mp; /* mount structure pointer */ |
| 2643 | xfs_agnumber_t sagno; /* starting allocation group number */ | 2593 | xfs_agnumber_t sagno; /* starting allocation group number */ |
| 2644 | xfs_alloctype_t type; /* input allocation type */ | 2594 | xfs_alloctype_t type; /* input allocation type */ |
| 2645 | int bump_rotor = 0; | 2595 | int bump_rotor = 0; |
| 2646 | int no_min = 0; | ||
| 2647 | xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */ | 2596 | xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */ |
| 2648 | 2597 | ||
| 2649 | mp = args->mp; | 2598 | mp = args->mp; |
| @@ -2672,7 +2621,6 @@ xfs_alloc_vextent( | |||
| 2672 | trace_xfs_alloc_vextent_badargs(args); | 2621 | trace_xfs_alloc_vextent_badargs(args); |
| 2673 | return 0; | 2622 | return 0; |
| 2674 | } | 2623 | } |
| 2675 | minleft = args->minleft; | ||
| 2676 | 2624 | ||
| 2677 | switch (type) { | 2625 | switch (type) { |
| 2678 | case XFS_ALLOCTYPE_THIS_AG: | 2626 | case XFS_ALLOCTYPE_THIS_AG: |
| @@ -2683,9 +2631,7 @@ xfs_alloc_vextent( | |||
| 2683 | */ | 2631 | */ |
| 2684 | args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); | 2632 | args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); |
| 2685 | args->pag = xfs_perag_get(mp, args->agno); | 2633 | args->pag = xfs_perag_get(mp, args->agno); |
| 2686 | args->minleft = 0; | ||
| 2687 | error = xfs_alloc_fix_freelist(args, 0); | 2634 | error = xfs_alloc_fix_freelist(args, 0); |
| 2688 | args->minleft = minleft; | ||
| 2689 | if (error) { | 2635 | if (error) { |
| 2690 | trace_xfs_alloc_vextent_nofix(args); | 2636 | trace_xfs_alloc_vextent_nofix(args); |
| 2691 | goto error0; | 2637 | goto error0; |
| @@ -2750,9 +2696,7 @@ xfs_alloc_vextent( | |||
| 2750 | */ | 2696 | */ |
| 2751 | for (;;) { | 2697 | for (;;) { |
| 2752 | args->pag = xfs_perag_get(mp, args->agno); | 2698 | args->pag = xfs_perag_get(mp, args->agno); |
| 2753 | if (no_min) args->minleft = 0; | ||
| 2754 | error = xfs_alloc_fix_freelist(args, flags); | 2699 | error = xfs_alloc_fix_freelist(args, flags); |
| 2755 | args->minleft = minleft; | ||
| 2756 | if (error) { | 2700 | if (error) { |
| 2757 | trace_xfs_alloc_vextent_nofix(args); | 2701 | trace_xfs_alloc_vextent_nofix(args); |
| 2758 | goto error0; | 2702 | goto error0; |
| @@ -2792,20 +2736,17 @@ xfs_alloc_vextent( | |||
| 2792 | * or switch to non-trylock mode. | 2736 | * or switch to non-trylock mode. |
| 2793 | */ | 2737 | */ |
| 2794 | if (args->agno == sagno) { | 2738 | if (args->agno == sagno) { |
| 2795 | if (no_min == 1) { | 2739 | if (flags == 0) { |
| 2796 | args->agbno = NULLAGBLOCK; | 2740 | args->agbno = NULLAGBLOCK; |
| 2797 | trace_xfs_alloc_vextent_allfailed(args); | 2741 | trace_xfs_alloc_vextent_allfailed(args); |
| 2798 | break; | 2742 | break; |
| 2799 | } | 2743 | } |
| 2800 | if (flags == 0) { | 2744 | |
| 2801 | no_min = 1; | 2745 | flags = 0; |
| 2802 | } else { | 2746 | if (type == XFS_ALLOCTYPE_START_BNO) { |
| 2803 | flags = 0; | 2747 | args->agbno = XFS_FSB_TO_AGBNO(mp, |
| 2804 | if (type == XFS_ALLOCTYPE_START_BNO) { | 2748 | args->fsbno); |
| 2805 | args->agbno = XFS_FSB_TO_AGBNO(mp, | 2749 | args->type = XFS_ALLOCTYPE_NEAR_BNO; |
| 2806 | args->fsbno); | ||
| 2807 | args->type = XFS_ALLOCTYPE_NEAR_BNO; | ||
| 2808 | } | ||
| 2809 | } | 2750 | } |
| 2810 | } | 2751 | } |
| 2811 | xfs_perag_put(args->pag); | 2752 | xfs_perag_put(args->pag); |
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index 7c404a6b0ae3..1d0f48a501a3 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h | |||
| @@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t; | |||
| 56 | #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/ | 56 | #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/ |
| 57 | #define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */ | 57 | #define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */ |
| 58 | #define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */ | 58 | #define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */ |
| 59 | 59 | #define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */ | |
| 60 | 60 | ||
| 61 | /* | 61 | /* |
| 62 | * Argument structure for xfs_alloc routines. | 62 | * Argument structure for xfs_alloc routines. |
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index af1ecb19121e..6622d46ddec3 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c | |||
| @@ -131,9 +131,6 @@ xfs_attr_get( | |||
| 131 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 131 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
| 132 | return -EIO; | 132 | return -EIO; |
| 133 | 133 | ||
| 134 | if (!xfs_inode_hasattr(ip)) | ||
| 135 | return -ENOATTR; | ||
| 136 | |||
| 137 | error = xfs_attr_args_init(&args, ip, name, flags); | 134 | error = xfs_attr_args_init(&args, ip, name, flags); |
| 138 | if (error) | 135 | if (error) |
| 139 | return error; | 136 | return error; |
| @@ -392,9 +389,6 @@ xfs_attr_remove( | |||
| 392 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 389 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
| 393 | return -EIO; | 390 | return -EIO; |
| 394 | 391 | ||
| 395 | if (!xfs_inode_hasattr(dp)) | ||
| 396 | return -ENOATTR; | ||
| 397 | |||
| 398 | error = xfs_attr_args_init(&args, dp, name, flags); | 392 | error = xfs_attr_args_init(&args, dp, name, flags); |
| 399 | if (error) | 393 | if (error) |
| 400 | return error; | 394 | return error; |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 2760bc3b2536..bfc00de5c6f1 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -3629,7 +3629,7 @@ xfs_bmap_btalloc( | |||
| 3629 | align = xfs_get_cowextsz_hint(ap->ip); | 3629 | align = xfs_get_cowextsz_hint(ap->ip); |
| 3630 | else if (xfs_alloc_is_userdata(ap->datatype)) | 3630 | else if (xfs_alloc_is_userdata(ap->datatype)) |
| 3631 | align = xfs_get_extsz_hint(ap->ip); | 3631 | align = xfs_get_extsz_hint(ap->ip); |
| 3632 | if (unlikely(align)) { | 3632 | if (align) { |
| 3633 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, | 3633 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, |
| 3634 | align, 0, ap->eof, 0, ap->conv, | 3634 | align, 0, ap->eof, 0, ap->conv, |
| 3635 | &ap->offset, &ap->length); | 3635 | &ap->offset, &ap->length); |
| @@ -3701,7 +3701,7 @@ xfs_bmap_btalloc( | |||
| 3701 | args.minlen = ap->minlen; | 3701 | args.minlen = ap->minlen; |
| 3702 | } | 3702 | } |
| 3703 | /* apply extent size hints if obtained earlier */ | 3703 | /* apply extent size hints if obtained earlier */ |
| 3704 | if (unlikely(align)) { | 3704 | if (align) { |
| 3705 | args.prod = align; | 3705 | args.prod = align; |
| 3706 | if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) | 3706 | if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) |
| 3707 | args.mod = (xfs_extlen_t)(args.prod - args.mod); | 3707 | args.mod = (xfs_extlen_t)(args.prod - args.mod); |
| @@ -3812,7 +3812,6 @@ xfs_bmap_btalloc( | |||
| 3812 | args.fsbno = 0; | 3812 | args.fsbno = 0; |
| 3813 | args.type = XFS_ALLOCTYPE_FIRST_AG; | 3813 | args.type = XFS_ALLOCTYPE_FIRST_AG; |
| 3814 | args.total = ap->minlen; | 3814 | args.total = ap->minlen; |
| 3815 | args.minleft = 0; | ||
| 3816 | if ((error = xfs_alloc_vextent(&args))) | 3815 | if ((error = xfs_alloc_vextent(&args))) |
| 3817 | return error; | 3816 | return error; |
| 3818 | ap->dfops->dop_low = true; | 3817 | ap->dfops->dop_low = true; |
| @@ -4344,8 +4343,6 @@ xfs_bmapi_allocate( | |||
| 4344 | if (error) | 4343 | if (error) |
| 4345 | return error; | 4344 | return error; |
| 4346 | 4345 | ||
| 4347 | if (bma->dfops->dop_low) | ||
| 4348 | bma->minleft = 0; | ||
| 4349 | if (bma->cur) | 4346 | if (bma->cur) |
| 4350 | bma->cur->bc_private.b.firstblock = *bma->firstblock; | 4347 | bma->cur->bc_private.b.firstblock = *bma->firstblock; |
| 4351 | if (bma->blkno == NULLFSBLOCK) | 4348 | if (bma->blkno == NULLFSBLOCK) |
| @@ -4517,8 +4514,6 @@ xfs_bmapi_write( | |||
| 4517 | int n; /* current extent index */ | 4514 | int n; /* current extent index */ |
| 4518 | xfs_fileoff_t obno; /* old block number (offset) */ | 4515 | xfs_fileoff_t obno; /* old block number (offset) */ |
| 4519 | int whichfork; /* data or attr fork */ | 4516 | int whichfork; /* data or attr fork */ |
| 4520 | char inhole; /* current location is hole in file */ | ||
| 4521 | char wasdelay; /* old extent was delayed */ | ||
| 4522 | 4517 | ||
| 4523 | #ifdef DEBUG | 4518 | #ifdef DEBUG |
| 4524 | xfs_fileoff_t orig_bno; /* original block number value */ | 4519 | xfs_fileoff_t orig_bno; /* original block number value */ |
| @@ -4606,22 +4601,44 @@ xfs_bmapi_write( | |||
| 4606 | bma.firstblock = firstblock; | 4601 | bma.firstblock = firstblock; |
| 4607 | 4602 | ||
| 4608 | while (bno < end && n < *nmap) { | 4603 | while (bno < end && n < *nmap) { |
| 4609 | inhole = eof || bma.got.br_startoff > bno; | 4604 | bool need_alloc = false, wasdelay = false; |
| 4610 | wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); | ||
| 4611 | 4605 | ||
| 4612 | /* | 4606 | /* in hole or beyoned EOF? */ |
| 4613 | * Make sure we only reflink into a hole. | 4607 | if (eof || bma.got.br_startoff > bno) { |
| 4614 | */ | 4608 | if (flags & XFS_BMAPI_DELALLOC) { |
| 4615 | if (flags & XFS_BMAPI_REMAP) | 4609 | /* |
| 4616 | ASSERT(inhole); | 4610 | * For the COW fork we can reasonably get a |
| 4617 | if (flags & XFS_BMAPI_COWFORK) | 4611 | * request for converting an extent that races |
| 4618 | ASSERT(!inhole); | 4612 | * with other threads already having converted |
| 4613 | * part of it, as there converting COW to | ||
| 4614 | * regular blocks is not protected using the | ||
| 4615 | * IOLOCK. | ||
| 4616 | */ | ||
| 4617 | ASSERT(flags & XFS_BMAPI_COWFORK); | ||
| 4618 | if (!(flags & XFS_BMAPI_COWFORK)) { | ||
| 4619 | error = -EIO; | ||
| 4620 | goto error0; | ||
| 4621 | } | ||
| 4622 | |||
| 4623 | if (eof || bno >= end) | ||
| 4624 | break; | ||
| 4625 | } else { | ||
| 4626 | need_alloc = true; | ||
| 4627 | } | ||
| 4628 | } else { | ||
| 4629 | /* | ||
| 4630 | * Make sure we only reflink into a hole. | ||
| 4631 | */ | ||
| 4632 | ASSERT(!(flags & XFS_BMAPI_REMAP)); | ||
| 4633 | if (isnullstartblock(bma.got.br_startblock)) | ||
| 4634 | wasdelay = true; | ||
| 4635 | } | ||
| 4619 | 4636 | ||
| 4620 | /* | 4637 | /* |
| 4621 | * First, deal with the hole before the allocated space | 4638 | * First, deal with the hole before the allocated space |
| 4622 | * that we found, if any. | 4639 | * that we found, if any. |
| 4623 | */ | 4640 | */ |
| 4624 | if (inhole || wasdelay) { | 4641 | if (need_alloc || wasdelay) { |
| 4625 | bma.eof = eof; | 4642 | bma.eof = eof; |
| 4626 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); | 4643 | bma.conv = !!(flags & XFS_BMAPI_CONVERT); |
| 4627 | bma.wasdel = wasdelay; | 4644 | bma.wasdel = wasdelay; |
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index cecd094404cc..cdef87db5262 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h | |||
| @@ -110,6 +110,9 @@ struct xfs_extent_free_item | |||
| 110 | /* Map something in the CoW fork. */ | 110 | /* Map something in the CoW fork. */ |
| 111 | #define XFS_BMAPI_COWFORK 0x200 | 111 | #define XFS_BMAPI_COWFORK 0x200 |
| 112 | 112 | ||
| 113 | /* Only convert delalloc space, don't allocate entirely new extents */ | ||
| 114 | #define XFS_BMAPI_DELALLOC 0x400 | ||
| 115 | |||
| 113 | #define XFS_BMAPI_FLAGS \ | 116 | #define XFS_BMAPI_FLAGS \ |
| 114 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ | 117 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ |
| 115 | { XFS_BMAPI_METADATA, "METADATA" }, \ | 118 | { XFS_BMAPI_METADATA, "METADATA" }, \ |
| @@ -120,7 +123,8 @@ struct xfs_extent_free_item | |||
| 120 | { XFS_BMAPI_CONVERT, "CONVERT" }, \ | 123 | { XFS_BMAPI_CONVERT, "CONVERT" }, \ |
| 121 | { XFS_BMAPI_ZERO, "ZERO" }, \ | 124 | { XFS_BMAPI_ZERO, "ZERO" }, \ |
| 122 | { XFS_BMAPI_REMAP, "REMAP" }, \ | 125 | { XFS_BMAPI_REMAP, "REMAP" }, \ |
| 123 | { XFS_BMAPI_COWFORK, "COWFORK" } | 126 | { XFS_BMAPI_COWFORK, "COWFORK" }, \ |
| 127 | { XFS_BMAPI_DELALLOC, "DELALLOC" } | ||
| 124 | 128 | ||
| 125 | 129 | ||
| 126 | static inline int xfs_bmapi_aflag(int w) | 130 | static inline int xfs_bmapi_aflag(int w) |
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index d6330c297ca0..d9be241fc86f 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c | |||
| @@ -502,12 +502,11 @@ try_another_ag: | |||
| 502 | if (args.fsbno == NULLFSBLOCK && args.minleft) { | 502 | if (args.fsbno == NULLFSBLOCK && args.minleft) { |
| 503 | /* | 503 | /* |
| 504 | * Could not find an AG with enough free space to satisfy | 504 | * Could not find an AG with enough free space to satisfy |
| 505 | * a full btree split. Try again without minleft and if | 505 | * a full btree split. Try again and if |
| 506 | * successful activate the lowspace algorithm. | 506 | * successful activate the lowspace algorithm. |
| 507 | */ | 507 | */ |
| 508 | args.fsbno = 0; | 508 | args.fsbno = 0; |
| 509 | args.type = XFS_ALLOCTYPE_FIRST_AG; | 509 | args.type = XFS_ALLOCTYPE_FIRST_AG; |
| 510 | args.minleft = 0; | ||
| 511 | error = xfs_alloc_vextent(&args); | 510 | error = xfs_alloc_vextent(&args); |
| 512 | if (error) | 511 | if (error) |
| 513 | goto error0; | 512 | goto error0; |
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index c58d72c220f5..2f389d366e93 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c | |||
| @@ -36,21 +36,29 @@ | |||
| 36 | struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; | 36 | struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * @mode, if set, indicates that the type field needs to be set up. | 39 | * Convert inode mode to directory entry filetype |
| 40 | * This uses the transformation from file mode to DT_* as defined in linux/fs.h | ||
| 41 | * for file type specification. This will be propagated into the directory | ||
| 42 | * structure if appropriate for the given operation and filesystem config. | ||
| 43 | */ | 40 | */ |
| 44 | const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = { | 41 | unsigned char xfs_mode_to_ftype(int mode) |
| 45 | [0] = XFS_DIR3_FT_UNKNOWN, | 42 | { |
| 46 | [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE, | 43 | switch (mode & S_IFMT) { |
| 47 | [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR, | 44 | case S_IFREG: |
| 48 | [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV, | 45 | return XFS_DIR3_FT_REG_FILE; |
| 49 | [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV, | 46 | case S_IFDIR: |
| 50 | [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO, | 47 | return XFS_DIR3_FT_DIR; |
| 51 | [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK, | 48 | case S_IFCHR: |
| 52 | [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK, | 49 | return XFS_DIR3_FT_CHRDEV; |
| 53 | }; | 50 | case S_IFBLK: |
| 51 | return XFS_DIR3_FT_BLKDEV; | ||
| 52 | case S_IFIFO: | ||
| 53 | return XFS_DIR3_FT_FIFO; | ||
| 54 | case S_IFSOCK: | ||
| 55 | return XFS_DIR3_FT_SOCK; | ||
| 56 | case S_IFLNK: | ||
| 57 | return XFS_DIR3_FT_SYMLINK; | ||
| 58 | default: | ||
| 59 | return XFS_DIR3_FT_UNKNOWN; | ||
| 60 | } | ||
| 61 | } | ||
| 54 | 62 | ||
| 55 | /* | 63 | /* |
| 56 | * ASCII case-insensitive (ie. A-Z) support for directories that was | 64 | * ASCII case-insensitive (ie. A-Z) support for directories that was |
| @@ -631,7 +639,8 @@ xfs_dir2_isblock( | |||
| 631 | if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK))) | 639 | if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK))) |
| 632 | return rval; | 640 | return rval; |
| 633 | rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize; | 641 | rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize; |
| 634 | ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize); | 642 | if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize) |
| 643 | return -EFSCORRUPTED; | ||
| 635 | *vp = rval; | 644 | *vp = rval; |
| 636 | return 0; | 645 | return 0; |
| 637 | } | 646 | } |
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 0197590fa7d7..d6e6d9d16f6c 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h | |||
| @@ -18,6 +18,9 @@ | |||
| 18 | #ifndef __XFS_DIR2_H__ | 18 | #ifndef __XFS_DIR2_H__ |
| 19 | #define __XFS_DIR2_H__ | 19 | #define __XFS_DIR2_H__ |
| 20 | 20 | ||
| 21 | #include "xfs_da_format.h" | ||
| 22 | #include "xfs_da_btree.h" | ||
| 23 | |||
| 21 | struct xfs_defer_ops; | 24 | struct xfs_defer_ops; |
| 22 | struct xfs_da_args; | 25 | struct xfs_da_args; |
| 23 | struct xfs_inode; | 26 | struct xfs_inode; |
| @@ -32,10 +35,9 @@ struct xfs_dir2_data_unused; | |||
| 32 | extern struct xfs_name xfs_name_dotdot; | 35 | extern struct xfs_name xfs_name_dotdot; |
| 33 | 36 | ||
| 34 | /* | 37 | /* |
| 35 | * directory filetype conversion tables. | 38 | * Convert inode mode to directory entry filetype |
| 36 | */ | 39 | */ |
| 37 | #define S_SHIFT 12 | 40 | extern unsigned char xfs_mode_to_ftype(int mode); |
| 38 | extern const unsigned char xfs_mode_to_ftype[]; | ||
| 39 | 41 | ||
| 40 | /* | 42 | /* |
| 41 | * directory operations vector for encode/decode routines | 43 | * directory operations vector for encode/decode routines |
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 0fd086d03d41..7c471881c9a6 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c | |||
| @@ -82,11 +82,12 @@ xfs_finobt_set_root( | |||
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | STATIC int | 84 | STATIC int |
| 85 | xfs_inobt_alloc_block( | 85 | __xfs_inobt_alloc_block( |
| 86 | struct xfs_btree_cur *cur, | 86 | struct xfs_btree_cur *cur, |
| 87 | union xfs_btree_ptr *start, | 87 | union xfs_btree_ptr *start, |
| 88 | union xfs_btree_ptr *new, | 88 | union xfs_btree_ptr *new, |
| 89 | int *stat) | 89 | int *stat, |
| 90 | enum xfs_ag_resv_type resv) | ||
| 90 | { | 91 | { |
| 91 | xfs_alloc_arg_t args; /* block allocation args */ | 92 | xfs_alloc_arg_t args; /* block allocation args */ |
| 92 | int error; /* error return value */ | 93 | int error; /* error return value */ |
| @@ -103,6 +104,7 @@ xfs_inobt_alloc_block( | |||
| 103 | args.maxlen = 1; | 104 | args.maxlen = 1; |
| 104 | args.prod = 1; | 105 | args.prod = 1; |
| 105 | args.type = XFS_ALLOCTYPE_NEAR_BNO; | 106 | args.type = XFS_ALLOCTYPE_NEAR_BNO; |
| 107 | args.resv = resv; | ||
| 106 | 108 | ||
| 107 | error = xfs_alloc_vextent(&args); | 109 | error = xfs_alloc_vextent(&args); |
| 108 | if (error) { | 110 | if (error) { |
| @@ -123,6 +125,27 @@ xfs_inobt_alloc_block( | |||
| 123 | } | 125 | } |
| 124 | 126 | ||
| 125 | STATIC int | 127 | STATIC int |
| 128 | xfs_inobt_alloc_block( | ||
| 129 | struct xfs_btree_cur *cur, | ||
| 130 | union xfs_btree_ptr *start, | ||
| 131 | union xfs_btree_ptr *new, | ||
| 132 | int *stat) | ||
| 133 | { | ||
| 134 | return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); | ||
| 135 | } | ||
| 136 | |||
| 137 | STATIC int | ||
| 138 | xfs_finobt_alloc_block( | ||
| 139 | struct xfs_btree_cur *cur, | ||
| 140 | union xfs_btree_ptr *start, | ||
| 141 | union xfs_btree_ptr *new, | ||
| 142 | int *stat) | ||
| 143 | { | ||
| 144 | return __xfs_inobt_alloc_block(cur, start, new, stat, | ||
| 145 | XFS_AG_RESV_METADATA); | ||
| 146 | } | ||
| 147 | |||
| 148 | STATIC int | ||
| 126 | xfs_inobt_free_block( | 149 | xfs_inobt_free_block( |
| 127 | struct xfs_btree_cur *cur, | 150 | struct xfs_btree_cur *cur, |
| 128 | struct xfs_buf *bp) | 151 | struct xfs_buf *bp) |
| @@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = { | |||
| 328 | 351 | ||
| 329 | .dup_cursor = xfs_inobt_dup_cursor, | 352 | .dup_cursor = xfs_inobt_dup_cursor, |
| 330 | .set_root = xfs_finobt_set_root, | 353 | .set_root = xfs_finobt_set_root, |
| 331 | .alloc_block = xfs_inobt_alloc_block, | 354 | .alloc_block = xfs_finobt_alloc_block, |
| 332 | .free_block = xfs_inobt_free_block, | 355 | .free_block = xfs_inobt_free_block, |
| 333 | .get_minrecs = xfs_inobt_get_minrecs, | 356 | .get_minrecs = xfs_inobt_get_minrecs, |
| 334 | .get_maxrecs = xfs_inobt_get_maxrecs, | 357 | .get_maxrecs = xfs_inobt_get_maxrecs, |
| @@ -480,3 +503,64 @@ xfs_inobt_rec_check_count( | |||
| 480 | return 0; | 503 | return 0; |
| 481 | } | 504 | } |
| 482 | #endif /* DEBUG */ | 505 | #endif /* DEBUG */ |
| 506 | |||
| 507 | static xfs_extlen_t | ||
| 508 | xfs_inobt_max_size( | ||
| 509 | struct xfs_mount *mp) | ||
| 510 | { | ||
| 511 | /* Bail out if we're uninitialized, which can happen in mkfs. */ | ||
| 512 | if (mp->m_inobt_mxr[0] == 0) | ||
| 513 | return 0; | ||
| 514 | |||
| 515 | return xfs_btree_calc_size(mp, mp->m_inobt_mnr, | ||
| 516 | (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / | ||
| 517 | XFS_INODES_PER_CHUNK); | ||
| 518 | } | ||
| 519 | |||
| 520 | static int | ||
| 521 | xfs_inobt_count_blocks( | ||
| 522 | struct xfs_mount *mp, | ||
| 523 | xfs_agnumber_t agno, | ||
| 524 | xfs_btnum_t btnum, | ||
| 525 | xfs_extlen_t *tree_blocks) | ||
| 526 | { | ||
| 527 | struct xfs_buf *agbp; | ||
| 528 | struct xfs_btree_cur *cur; | ||
| 529 | int error; | ||
| 530 | |||
| 531 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); | ||
| 532 | if (error) | ||
| 533 | return error; | ||
| 534 | |||
| 535 | cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum); | ||
| 536 | error = xfs_btree_count_blocks(cur, tree_blocks); | ||
| 537 | xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | ||
| 538 | xfs_buf_relse(agbp); | ||
| 539 | |||
| 540 | return error; | ||
| 541 | } | ||
| 542 | |||
| 543 | /* | ||
| 544 | * Figure out how many blocks to reserve and how many are used by this btree. | ||
| 545 | */ | ||
| 546 | int | ||
| 547 | xfs_finobt_calc_reserves( | ||
| 548 | struct xfs_mount *mp, | ||
| 549 | xfs_agnumber_t agno, | ||
| 550 | xfs_extlen_t *ask, | ||
| 551 | xfs_extlen_t *used) | ||
| 552 | { | ||
| 553 | xfs_extlen_t tree_len = 0; | ||
| 554 | int error; | ||
| 555 | |||
| 556 | if (!xfs_sb_version_hasfinobt(&mp->m_sb)) | ||
| 557 | return 0; | ||
| 558 | |||
| 559 | error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len); | ||
| 560 | if (error) | ||
| 561 | return error; | ||
| 562 | |||
| 563 | *ask += xfs_inobt_max_size(mp); | ||
| 564 | *used += tree_len; | ||
| 565 | return 0; | ||
| 566 | } | ||
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h index bd88453217ce..aa81e2e63f3f 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.h +++ b/fs/xfs/libxfs/xfs_ialloc_btree.h | |||
| @@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *, | |||
| 72 | #define xfs_inobt_rec_check_count(mp, rec) 0 | 72 | #define xfs_inobt_rec_check_count(mp, rec) 0 |
| 73 | #endif /* DEBUG */ | 73 | #endif /* DEBUG */ |
| 74 | 74 | ||
| 75 | int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno, | ||
| 76 | xfs_extlen_t *ask, xfs_extlen_t *used); | ||
| 77 | |||
| 75 | #endif /* __XFS_IALLOC_BTREE_H__ */ | 78 | #endif /* __XFS_IALLOC_BTREE_H__ */ |
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index dd483e2767f7..d93f9d918cfc 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include "xfs_icache.h" | 29 | #include "xfs_icache.h" |
| 30 | #include "xfs_trans.h" | 30 | #include "xfs_trans.h" |
| 31 | #include "xfs_ialloc.h" | 31 | #include "xfs_ialloc.h" |
| 32 | #include "xfs_dir2.h" | ||
| 32 | 33 | ||
| 33 | /* | 34 | /* |
| 34 | * Check that none of the inode's in the buffer have a next | 35 | * Check that none of the inode's in the buffer have a next |
| @@ -386,6 +387,7 @@ xfs_dinode_verify( | |||
| 386 | xfs_ino_t ino, | 387 | xfs_ino_t ino, |
| 387 | struct xfs_dinode *dip) | 388 | struct xfs_dinode *dip) |
| 388 | { | 389 | { |
| 390 | uint16_t mode; | ||
| 389 | uint16_t flags; | 391 | uint16_t flags; |
| 390 | uint64_t flags2; | 392 | uint64_t flags2; |
| 391 | 393 | ||
| @@ -396,8 +398,12 @@ xfs_dinode_verify( | |||
| 396 | if (be64_to_cpu(dip->di_size) & (1ULL << 63)) | 398 | if (be64_to_cpu(dip->di_size) & (1ULL << 63)) |
| 397 | return false; | 399 | return false; |
| 398 | 400 | ||
| 399 | /* No zero-length symlinks. */ | 401 | mode = be16_to_cpu(dip->di_mode); |
| 400 | if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) | 402 | if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) |
| 403 | return false; | ||
| 404 | |||
| 405 | /* No zero-length symlinks/dirs. */ | ||
| 406 | if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0) | ||
| 401 | return false; | 407 | return false; |
| 402 | 408 | ||
| 403 | /* only version 3 or greater inodes are extensively verified here */ | 409 | /* only version 3 or greater inodes are extensively verified here */ |
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index 6fb2215f8ff7..50add5272807 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c | |||
| @@ -409,13 +409,14 @@ xfs_refcountbt_calc_size( | |||
| 409 | */ | 409 | */ |
| 410 | xfs_extlen_t | 410 | xfs_extlen_t |
| 411 | xfs_refcountbt_max_size( | 411 | xfs_refcountbt_max_size( |
| 412 | struct xfs_mount *mp) | 412 | struct xfs_mount *mp, |
| 413 | xfs_agblock_t agblocks) | ||
| 413 | { | 414 | { |
| 414 | /* Bail out if we're uninitialized, which can happen in mkfs. */ | 415 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
| 415 | if (mp->m_refc_mxr[0] == 0) | 416 | if (mp->m_refc_mxr[0] == 0) |
| 416 | return 0; | 417 | return 0; |
| 417 | 418 | ||
| 418 | return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks); | 419 | return xfs_refcountbt_calc_size(mp, agblocks); |
| 419 | } | 420 | } |
| 420 | 421 | ||
| 421 | /* | 422 | /* |
| @@ -430,22 +431,24 @@ xfs_refcountbt_calc_reserves( | |||
| 430 | { | 431 | { |
| 431 | struct xfs_buf *agbp; | 432 | struct xfs_buf *agbp; |
| 432 | struct xfs_agf *agf; | 433 | struct xfs_agf *agf; |
| 434 | xfs_agblock_t agblocks; | ||
| 433 | xfs_extlen_t tree_len; | 435 | xfs_extlen_t tree_len; |
| 434 | int error; | 436 | int error; |
| 435 | 437 | ||
| 436 | if (!xfs_sb_version_hasreflink(&mp->m_sb)) | 438 | if (!xfs_sb_version_hasreflink(&mp->m_sb)) |
| 437 | return 0; | 439 | return 0; |
| 438 | 440 | ||
| 439 | *ask += xfs_refcountbt_max_size(mp); | ||
| 440 | 441 | ||
| 441 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); | 442 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); |
| 442 | if (error) | 443 | if (error) |
| 443 | return error; | 444 | return error; |
| 444 | 445 | ||
| 445 | agf = XFS_BUF_TO_AGF(agbp); | 446 | agf = XFS_BUF_TO_AGF(agbp); |
| 447 | agblocks = be32_to_cpu(agf->agf_length); | ||
| 446 | tree_len = be32_to_cpu(agf->agf_refcount_blocks); | 448 | tree_len = be32_to_cpu(agf->agf_refcount_blocks); |
| 447 | xfs_buf_relse(agbp); | 449 | xfs_buf_relse(agbp); |
| 448 | 450 | ||
| 451 | *ask += xfs_refcountbt_max_size(mp, agblocks); | ||
| 449 | *used += tree_len; | 452 | *used += tree_len; |
| 450 | 453 | ||
| 451 | return error; | 454 | return error; |
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h index 3be7768bd51a..9db008b955b7 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.h +++ b/fs/xfs/libxfs/xfs_refcount_btree.h | |||
| @@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp); | |||
| 66 | 66 | ||
| 67 | extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp, | 67 | extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp, |
| 68 | unsigned long long len); | 68 | unsigned long long len); |
| 69 | extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp); | 69 | extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp, |
| 70 | xfs_agblock_t agblocks); | ||
| 70 | 71 | ||
| 71 | extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp, | 72 | extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp, |
| 72 | xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); | 73 | xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); |
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index de25771764ba..74e5a54bc428 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c | |||
| @@ -550,13 +550,14 @@ xfs_rmapbt_calc_size( | |||
| 550 | */ | 550 | */ |
| 551 | xfs_extlen_t | 551 | xfs_extlen_t |
| 552 | xfs_rmapbt_max_size( | 552 | xfs_rmapbt_max_size( |
| 553 | struct xfs_mount *mp) | 553 | struct xfs_mount *mp, |
| 554 | xfs_agblock_t agblocks) | ||
| 554 | { | 555 | { |
| 555 | /* Bail out if we're uninitialized, which can happen in mkfs. */ | 556 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
| 556 | if (mp->m_rmap_mxr[0] == 0) | 557 | if (mp->m_rmap_mxr[0] == 0) |
| 557 | return 0; | 558 | return 0; |
| 558 | 559 | ||
| 559 | return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks); | 560 | return xfs_rmapbt_calc_size(mp, agblocks); |
| 560 | } | 561 | } |
| 561 | 562 | ||
| 562 | /* | 563 | /* |
| @@ -571,25 +572,24 @@ xfs_rmapbt_calc_reserves( | |||
| 571 | { | 572 | { |
| 572 | struct xfs_buf *agbp; | 573 | struct xfs_buf *agbp; |
| 573 | struct xfs_agf *agf; | 574 | struct xfs_agf *agf; |
| 574 | xfs_extlen_t pool_len; | 575 | xfs_agblock_t agblocks; |
| 575 | xfs_extlen_t tree_len; | 576 | xfs_extlen_t tree_len; |
| 576 | int error; | 577 | int error; |
| 577 | 578 | ||
| 578 | if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) | 579 | if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) |
| 579 | return 0; | 580 | return 0; |
| 580 | 581 | ||
| 581 | /* Reserve 1% of the AG or enough for 1 block per record. */ | ||
| 582 | pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp)); | ||
| 583 | *ask += pool_len; | ||
| 584 | |||
| 585 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); | 582 | error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); |
| 586 | if (error) | 583 | if (error) |
| 587 | return error; | 584 | return error; |
| 588 | 585 | ||
| 589 | agf = XFS_BUF_TO_AGF(agbp); | 586 | agf = XFS_BUF_TO_AGF(agbp); |
| 587 | agblocks = be32_to_cpu(agf->agf_length); | ||
| 590 | tree_len = be32_to_cpu(agf->agf_rmap_blocks); | 588 | tree_len = be32_to_cpu(agf->agf_rmap_blocks); |
| 591 | xfs_buf_relse(agbp); | 589 | xfs_buf_relse(agbp); |
| 592 | 590 | ||
| 591 | /* Reserve 1% of the AG or enough for 1 block per record. */ | ||
| 592 | *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks)); | ||
| 593 | *used += tree_len; | 593 | *used += tree_len; |
| 594 | 594 | ||
| 595 | return error; | 595 | return error; |
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h index 2a9ac472fb15..19c08e933049 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.h +++ b/fs/xfs/libxfs/xfs_rmap_btree.h | |||
| @@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp); | |||
| 60 | 60 | ||
| 61 | extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp, | 61 | extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp, |
| 62 | unsigned long long len); | 62 | unsigned long long len); |
| 63 | extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp); | 63 | extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp, |
| 64 | xfs_agblock_t agblocks); | ||
| 64 | 65 | ||
| 65 | extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, | 66 | extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, |
| 66 | xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); | 67 | xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); |
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 2580262e4ea0..584ec896a533 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c | |||
| @@ -242,7 +242,7 @@ xfs_mount_validate_sb( | |||
| 242 | sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || | 242 | sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || |
| 243 | sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || | 243 | sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || |
| 244 | sbp->sb_blocksize != (1 << sbp->sb_blocklog) || | 244 | sbp->sb_blocksize != (1 << sbp->sb_blocklog) || |
| 245 | sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG || | 245 | sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || |
| 246 | sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || | 246 | sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || |
| 247 | sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || | 247 | sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || |
| 248 | sbp->sb_inodelog < XFS_DINODE_MIN_LOG || | 248 | sbp->sb_inodelog < XFS_DINODE_MIN_LOG || |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 0f56fcd3a5d5..631e7c0e0a29 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
| @@ -1152,19 +1152,22 @@ xfs_vm_releasepage( | |||
| 1152 | * block_invalidatepage() can send pages that are still marked dirty | 1152 | * block_invalidatepage() can send pages that are still marked dirty |
| 1153 | * but otherwise have invalidated buffers. | 1153 | * but otherwise have invalidated buffers. |
| 1154 | * | 1154 | * |
| 1155 | * We've historically freed buffers on the latter. Instead, quietly | 1155 | * We want to release the latter to avoid unnecessary buildup of the |
| 1156 | * filter out all dirty pages to avoid spurious buffer state warnings. | 1156 | * LRU, skip the former and warn if we've left any lingering |
| 1157 | * This can likely be removed once shrink_active_list() is fixed. | 1157 | * delalloc/unwritten buffers on clean pages. Skip pages with delalloc |
| 1158 | * or unwritten buffers and warn if the page is not dirty. Otherwise | ||
| 1159 | * try to release the buffers. | ||
| 1158 | */ | 1160 | */ |
| 1159 | if (PageDirty(page)) | ||
| 1160 | return 0; | ||
| 1161 | |||
| 1162 | xfs_count_page_state(page, &delalloc, &unwritten); | 1161 | xfs_count_page_state(page, &delalloc, &unwritten); |
| 1163 | 1162 | ||
| 1164 | if (WARN_ON_ONCE(delalloc)) | 1163 | if (delalloc) { |
| 1164 | WARN_ON_ONCE(!PageDirty(page)); | ||
| 1165 | return 0; | 1165 | return 0; |
| 1166 | if (WARN_ON_ONCE(unwritten)) | 1166 | } |
| 1167 | if (unwritten) { | ||
| 1168 | WARN_ON_ONCE(!PageDirty(page)); | ||
| 1167 | return 0; | 1169 | return 0; |
| 1170 | } | ||
| 1168 | 1171 | ||
| 1169 | return try_to_free_buffers(page); | 1172 | return try_to_free_buffers(page); |
| 1170 | } | 1173 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index b9abce524c33..c1417919ab0a 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -528,7 +528,6 @@ xfs_getbmap( | |||
| 528 | xfs_bmbt_irec_t *map; /* buffer for user's data */ | 528 | xfs_bmbt_irec_t *map; /* buffer for user's data */ |
| 529 | xfs_mount_t *mp; /* file system mount point */ | 529 | xfs_mount_t *mp; /* file system mount point */ |
| 530 | int nex; /* # of user extents can do */ | 530 | int nex; /* # of user extents can do */ |
| 531 | int nexleft; /* # of user extents left */ | ||
| 532 | int subnex; /* # of bmapi's can do */ | 531 | int subnex; /* # of bmapi's can do */ |
| 533 | int nmap; /* number of map entries */ | 532 | int nmap; /* number of map entries */ |
| 534 | struct getbmapx *out; /* output structure */ | 533 | struct getbmapx *out; /* output structure */ |
| @@ -686,10 +685,8 @@ xfs_getbmap( | |||
| 686 | goto out_free_map; | 685 | goto out_free_map; |
| 687 | } | 686 | } |
| 688 | 687 | ||
| 689 | nexleft = nex; | ||
| 690 | |||
| 691 | do { | 688 | do { |
| 692 | nmap = (nexleft > subnex) ? subnex : nexleft; | 689 | nmap = (nex> subnex) ? subnex : nex; |
| 693 | error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), | 690 | error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), |
| 694 | XFS_BB_TO_FSB(mp, bmv->bmv_length), | 691 | XFS_BB_TO_FSB(mp, bmv->bmv_length), |
| 695 | map, &nmap, bmapi_flags); | 692 | map, &nmap, bmapi_flags); |
| @@ -697,8 +694,8 @@ xfs_getbmap( | |||
| 697 | goto out_free_map; | 694 | goto out_free_map; |
| 698 | ASSERT(nmap <= subnex); | 695 | ASSERT(nmap <= subnex); |
| 699 | 696 | ||
| 700 | for (i = 0; i < nmap && nexleft && bmv->bmv_length && | 697 | for (i = 0; i < nmap && bmv->bmv_length && |
| 701 | cur_ext < bmv->bmv_count; i++) { | 698 | cur_ext < bmv->bmv_count - 1; i++) { |
| 702 | out[cur_ext].bmv_oflags = 0; | 699 | out[cur_ext].bmv_oflags = 0; |
| 703 | if (map[i].br_state == XFS_EXT_UNWRITTEN) | 700 | if (map[i].br_state == XFS_EXT_UNWRITTEN) |
| 704 | out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; | 701 | out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; |
| @@ -760,16 +757,27 @@ xfs_getbmap( | |||
| 760 | continue; | 757 | continue; |
| 761 | } | 758 | } |
| 762 | 759 | ||
| 760 | /* | ||
| 761 | * In order to report shared extents accurately, | ||
| 762 | * we report each distinct shared/unshared part | ||
| 763 | * of a single bmbt record using multiple bmap | ||
| 764 | * extents. To make that happen, we iterate the | ||
| 765 | * same map array item multiple times, each | ||
| 766 | * time trimming out the subextent that we just | ||
| 767 | * reported. | ||
| 768 | * | ||
| 769 | * Because of this, we must check the out array | ||
| 770 | * index (cur_ext) directly against bmv_count-1 | ||
| 771 | * to avoid overflows. | ||
| 772 | */ | ||
| 763 | if (inject_map.br_startblock != NULLFSBLOCK) { | 773 | if (inject_map.br_startblock != NULLFSBLOCK) { |
| 764 | map[i] = inject_map; | 774 | map[i] = inject_map; |
| 765 | i--; | 775 | i--; |
| 766 | } else | 776 | } |
| 767 | nexleft--; | ||
| 768 | bmv->bmv_entries++; | 777 | bmv->bmv_entries++; |
| 769 | cur_ext++; | 778 | cur_ext++; |
| 770 | } | 779 | } |
| 771 | } while (nmap && nexleft && bmv->bmv_length && | 780 | } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1); |
| 772 | cur_ext < bmv->bmv_count); | ||
| 773 | 781 | ||
| 774 | out_free_map: | 782 | out_free_map: |
| 775 | kmem_free(map); | 783 | kmem_free(map); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 7f0a01f7b592..8c7d01b75922 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
| @@ -422,6 +422,7 @@ retry: | |||
| 422 | out_free_pages: | 422 | out_free_pages: |
| 423 | for (i = 0; i < bp->b_page_count; i++) | 423 | for (i = 0; i < bp->b_page_count; i++) |
| 424 | __free_page(bp->b_pages[i]); | 424 | __free_page(bp->b_pages[i]); |
| 425 | bp->b_flags &= ~_XBF_PAGES; | ||
| 425 | return error; | 426 | return error; |
| 426 | } | 427 | } |
| 427 | 428 | ||
| @@ -757,7 +758,7 @@ xfs_buf_readahead_map( | |||
| 757 | int nmaps, | 758 | int nmaps, |
| 758 | const struct xfs_buf_ops *ops) | 759 | const struct xfs_buf_ops *ops) |
| 759 | { | 760 | { |
| 760 | if (bdi_read_congested(target->bt_bdi)) | 761 | if (bdi_read_congested(target->bt_bdev->bd_bdi)) |
| 761 | return; | 762 | return; |
| 762 | 763 | ||
| 763 | xfs_buf_read_map(target, map, nmaps, | 764 | xfs_buf_read_map(target, map, nmaps, |
| @@ -1790,7 +1791,6 @@ xfs_alloc_buftarg( | |||
| 1790 | btp->bt_mount = mp; | 1791 | btp->bt_mount = mp; |
| 1791 | btp->bt_dev = bdev->bd_dev; | 1792 | btp->bt_dev = bdev->bd_dev; |
| 1792 | btp->bt_bdev = bdev; | 1793 | btp->bt_bdev = bdev; |
| 1793 | btp->bt_bdi = blk_get_backing_dev_info(bdev); | ||
| 1794 | 1794 | ||
| 1795 | if (xfs_setsize_buftarg_early(btp, bdev)) | 1795 | if (xfs_setsize_buftarg_early(btp, bdev)) |
| 1796 | goto error; | 1796 | goto error; |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 8a9d3a9599f0..3c867e5a63e1 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
| @@ -109,7 +109,6 @@ typedef unsigned int xfs_buf_flags_t; | |||
| 109 | typedef struct xfs_buftarg { | 109 | typedef struct xfs_buftarg { |
| 110 | dev_t bt_dev; | 110 | dev_t bt_dev; |
| 111 | struct block_device *bt_bdev; | 111 | struct block_device *bt_bdev; |
| 112 | struct backing_dev_info *bt_bdi; | ||
| 113 | struct xfs_mount *bt_mount; | 112 | struct xfs_mount *bt_mount; |
| 114 | unsigned int bt_meta_sectorsize; | 113 | unsigned int bt_meta_sectorsize; |
| 115 | size_t bt_meta_sectormask; | 114 | size_t bt_meta_sectormask; |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 7a30b8f11db7..9d06cc30e875 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
| @@ -710,6 +710,10 @@ xfs_dq_get_next_id( | |||
| 710 | /* Simple advance */ | 710 | /* Simple advance */ |
| 711 | next_id = *id + 1; | 711 | next_id = *id + 1; |
| 712 | 712 | ||
| 713 | /* If we'd wrap past the max ID, stop */ | ||
| 714 | if (next_id < *id) | ||
| 715 | return -ENOENT; | ||
| 716 | |||
| 713 | /* If new ID is within the current chunk, advancing it sufficed */ | 717 | /* If new ID is within the current chunk, advancing it sufficed */ |
| 714 | if (next_id % mp->m_quotainfo->qi_dqperchunk) { | 718 | if (next_id % mp->m_quotainfo->qi_dqperchunk) { |
| 715 | *id = next_id; | 719 | *id = next_id; |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 93d12fa2670d..242e8091296d 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
| @@ -631,6 +631,20 @@ xfs_growfs_data_private( | |||
| 631 | xfs_set_low_space_thresholds(mp); | 631 | xfs_set_low_space_thresholds(mp); |
| 632 | mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); | 632 | mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); |
| 633 | 633 | ||
| 634 | /* | ||
| 635 | * If we expanded the last AG, free the per-AG reservation | ||
| 636 | * so we can reinitialize it with the new size. | ||
| 637 | */ | ||
| 638 | if (new) { | ||
| 639 | struct xfs_perag *pag; | ||
| 640 | |||
| 641 | pag = xfs_perag_get(mp, agno); | ||
| 642 | error = xfs_ag_resv_free(pag); | ||
| 643 | xfs_perag_put(pag); | ||
| 644 | if (error) | ||
| 645 | goto out; | ||
| 646 | } | ||
| 647 | |||
| 634 | /* Reserve AG metadata blocks. */ | 648 | /* Reserve AG metadata blocks. */ |
| 635 | error = xfs_fs_reserve_ag_blocks(mp); | 649 | error = xfs_fs_reserve_ag_blocks(mp); |
| 636 | if (error && error != -ENOSPC) | 650 | if (error && error != -ENOSPC) |
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index ff4d6311c7f4..70ca4f608321 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
| @@ -1597,7 +1597,8 @@ xfs_inode_free_cowblocks( | |||
| 1597 | * If the mapping is dirty or under writeback we cannot touch the | 1597 | * If the mapping is dirty or under writeback we cannot touch the |
| 1598 | * CoW fork. Leave it alone if we're in the midst of a directio. | 1598 | * CoW fork. Leave it alone if we're in the midst of a directio. |
| 1599 | */ | 1599 | */ |
| 1600 | if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || | 1600 | if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || |
| 1601 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || | ||
| 1601 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || | 1602 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || |
| 1602 | atomic_read(&VFS_I(ip)->i_dio_count)) | 1603 | atomic_read(&VFS_I(ip)->i_dio_count)) |
| 1603 | return 0; | 1604 | return 0; |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b9557795eb74..de32f0fe47c8 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -1792,22 +1792,23 @@ xfs_inactive_ifree( | |||
| 1792 | int error; | 1792 | int error; |
| 1793 | 1793 | ||
| 1794 | /* | 1794 | /* |
| 1795 | * The ifree transaction might need to allocate blocks for record | 1795 | * We try to use a per-AG reservation for any block needed by the finobt |
| 1796 | * insertion to the finobt. We don't want to fail here at ENOSPC, so | 1796 | * tree, but as the finobt feature predates the per-AG reservation |
| 1797 | * allow ifree to dip into the reserved block pool if necessary. | 1797 | * support a degraded file system might not have enough space for the |
| 1798 | * | 1798 | * reservation at mount time. In that case try to dip into the reserved |
| 1799 | * Freeing large sets of inodes generally means freeing inode chunks, | 1799 | * pool and pray. |
| 1800 | * directory and file data blocks, so this should be relatively safe. | ||
| 1801 | * Only under severe circumstances should it be possible to free enough | ||
| 1802 | * inodes to exhaust the reserve block pool via finobt expansion while | ||
| 1803 | * at the same time not creating free space in the filesystem. | ||
| 1804 | * | 1800 | * |
| 1805 | * Send a warning if the reservation does happen to fail, as the inode | 1801 | * Send a warning if the reservation does happen to fail, as the inode |
| 1806 | * now remains allocated and sits on the unlinked list until the fs is | 1802 | * now remains allocated and sits on the unlinked list until the fs is |
| 1807 | * repaired. | 1803 | * repaired. |
| 1808 | */ | 1804 | */ |
| 1809 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, | 1805 | if (unlikely(mp->m_inotbt_nores)) { |
| 1810 | XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); | 1806 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, |
| 1807 | XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, | ||
| 1808 | &tp); | ||
| 1809 | } else { | ||
| 1810 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); | ||
| 1811 | } | ||
| 1811 | if (error) { | 1812 | if (error) { |
| 1812 | if (error == -ENOSPC) { | 1813 | if (error == -ENOSPC) { |
| 1813 | xfs_warn_ratelimited(mp, | 1814 | xfs_warn_ratelimited(mp, |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 0d147428971e..1aa3abd67b36 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
| @@ -681,7 +681,7 @@ xfs_iomap_write_allocate( | |||
| 681 | xfs_trans_t *tp; | 681 | xfs_trans_t *tp; |
| 682 | int nimaps; | 682 | int nimaps; |
| 683 | int error = 0; | 683 | int error = 0; |
| 684 | int flags = 0; | 684 | int flags = XFS_BMAPI_DELALLOC; |
| 685 | int nres; | 685 | int nres; |
| 686 | 686 | ||
| 687 | if (whichfork == XFS_COW_FORK) | 687 | if (whichfork == XFS_COW_FORK) |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 308bebb6dfd2..22c16155f1b4 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
| @@ -98,12 +98,27 @@ xfs_init_security( | |||
| 98 | static void | 98 | static void |
| 99 | xfs_dentry_to_name( | 99 | xfs_dentry_to_name( |
| 100 | struct xfs_name *namep, | 100 | struct xfs_name *namep, |
| 101 | struct dentry *dentry) | ||
| 102 | { | ||
| 103 | namep->name = dentry->d_name.name; | ||
| 104 | namep->len = dentry->d_name.len; | ||
| 105 | namep->type = XFS_DIR3_FT_UNKNOWN; | ||
| 106 | } | ||
| 107 | |||
| 108 | static int | ||
| 109 | xfs_dentry_mode_to_name( | ||
| 110 | struct xfs_name *namep, | ||
| 101 | struct dentry *dentry, | 111 | struct dentry *dentry, |
| 102 | int mode) | 112 | int mode) |
| 103 | { | 113 | { |
| 104 | namep->name = dentry->d_name.name; | 114 | namep->name = dentry->d_name.name; |
| 105 | namep->len = dentry->d_name.len; | 115 | namep->len = dentry->d_name.len; |
| 106 | namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT]; | 116 | namep->type = xfs_mode_to_ftype(mode); |
| 117 | |||
| 118 | if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN)) | ||
| 119 | return -EFSCORRUPTED; | ||
| 120 | |||
| 121 | return 0; | ||
| 107 | } | 122 | } |
| 108 | 123 | ||
| 109 | STATIC void | 124 | STATIC void |
| @@ -119,7 +134,7 @@ xfs_cleanup_inode( | |||
| 119 | * xfs_init_security we must back out. | 134 | * xfs_init_security we must back out. |
| 120 | * ENOSPC can hit here, among other things. | 135 | * ENOSPC can hit here, among other things. |
| 121 | */ | 136 | */ |
| 122 | xfs_dentry_to_name(&teardown, dentry, 0); | 137 | xfs_dentry_to_name(&teardown, dentry); |
| 123 | 138 | ||
| 124 | xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); | 139 | xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); |
| 125 | } | 140 | } |
| @@ -154,8 +169,12 @@ xfs_generic_create( | |||
| 154 | if (error) | 169 | if (error) |
| 155 | return error; | 170 | return error; |
| 156 | 171 | ||
| 172 | /* Verify mode is valid also for tmpfile case */ | ||
| 173 | error = xfs_dentry_mode_to_name(&name, dentry, mode); | ||
| 174 | if (unlikely(error)) | ||
| 175 | goto out_free_acl; | ||
| 176 | |||
| 157 | if (!tmpfile) { | 177 | if (!tmpfile) { |
| 158 | xfs_dentry_to_name(&name, dentry, mode); | ||
| 159 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); | 178 | error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); |
| 160 | } else { | 179 | } else { |
| 161 | error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); | 180 | error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); |
| @@ -248,7 +267,7 @@ xfs_vn_lookup( | |||
| 248 | if (dentry->d_name.len >= MAXNAMELEN) | 267 | if (dentry->d_name.len >= MAXNAMELEN) |
| 249 | return ERR_PTR(-ENAMETOOLONG); | 268 | return ERR_PTR(-ENAMETOOLONG); |
| 250 | 269 | ||
| 251 | xfs_dentry_to_name(&name, dentry, 0); | 270 | xfs_dentry_to_name(&name, dentry); |
| 252 | error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); | 271 | error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); |
| 253 | if (unlikely(error)) { | 272 | if (unlikely(error)) { |
| 254 | if (unlikely(error != -ENOENT)) | 273 | if (unlikely(error != -ENOENT)) |
| @@ -275,7 +294,7 @@ xfs_vn_ci_lookup( | |||
| 275 | if (dentry->d_name.len >= MAXNAMELEN) | 294 | if (dentry->d_name.len >= MAXNAMELEN) |
| 276 | return ERR_PTR(-ENAMETOOLONG); | 295 | return ERR_PTR(-ENAMETOOLONG); |
| 277 | 296 | ||
| 278 | xfs_dentry_to_name(&xname, dentry, 0); | 297 | xfs_dentry_to_name(&xname, dentry); |
| 279 | error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); | 298 | error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); |
| 280 | if (unlikely(error)) { | 299 | if (unlikely(error)) { |
| 281 | if (unlikely(error != -ENOENT)) | 300 | if (unlikely(error != -ENOENT)) |
| @@ -310,7 +329,9 @@ xfs_vn_link( | |||
| 310 | struct xfs_name name; | 329 | struct xfs_name name; |
| 311 | int error; | 330 | int error; |
| 312 | 331 | ||
| 313 | xfs_dentry_to_name(&name, dentry, inode->i_mode); | 332 | error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode); |
| 333 | if (unlikely(error)) | ||
| 334 | return error; | ||
| 314 | 335 | ||
| 315 | error = xfs_link(XFS_I(dir), XFS_I(inode), &name); | 336 | error = xfs_link(XFS_I(dir), XFS_I(inode), &name); |
| 316 | if (unlikely(error)) | 337 | if (unlikely(error)) |
| @@ -329,7 +350,7 @@ xfs_vn_unlink( | |||
| 329 | struct xfs_name name; | 350 | struct xfs_name name; |
| 330 | int error; | 351 | int error; |
| 331 | 352 | ||
| 332 | xfs_dentry_to_name(&name, dentry, 0); | 353 | xfs_dentry_to_name(&name, dentry); |
| 333 | 354 | ||
| 334 | error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); | 355 | error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); |
| 335 | if (error) | 356 | if (error) |
| @@ -359,7 +380,9 @@ xfs_vn_symlink( | |||
| 359 | 380 | ||
| 360 | mode = S_IFLNK | | 381 | mode = S_IFLNK | |
| 361 | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); | 382 | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); |
| 362 | xfs_dentry_to_name(&name, dentry, mode); | 383 | error = xfs_dentry_mode_to_name(&name, dentry, mode); |
| 384 | if (unlikely(error)) | ||
| 385 | goto out; | ||
| 363 | 386 | ||
| 364 | error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); | 387 | error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); |
| 365 | if (unlikely(error)) | 388 | if (unlikely(error)) |
| @@ -395,6 +418,7 @@ xfs_vn_rename( | |||
| 395 | { | 418 | { |
| 396 | struct inode *new_inode = d_inode(ndentry); | 419 | struct inode *new_inode = d_inode(ndentry); |
| 397 | int omode = 0; | 420 | int omode = 0; |
| 421 | int error; | ||
| 398 | struct xfs_name oname; | 422 | struct xfs_name oname; |
| 399 | struct xfs_name nname; | 423 | struct xfs_name nname; |
| 400 | 424 | ||
| @@ -405,8 +429,14 @@ xfs_vn_rename( | |||
| 405 | if (flags & RENAME_EXCHANGE) | 429 | if (flags & RENAME_EXCHANGE) |
| 406 | omode = d_inode(ndentry)->i_mode; | 430 | omode = d_inode(ndentry)->i_mode; |
| 407 | 431 | ||
| 408 | xfs_dentry_to_name(&oname, odentry, omode); | 432 | error = xfs_dentry_mode_to_name(&oname, odentry, omode); |
| 409 | xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode); | 433 | if (omode && unlikely(error)) |
| 434 | return error; | ||
| 435 | |||
| 436 | error = xfs_dentry_mode_to_name(&nname, ndentry, | ||
| 437 | d_inode(odentry)->i_mode); | ||
| 438 | if (unlikely(error)) | ||
| 439 | return error; | ||
| 410 | 440 | ||
| 411 | return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), | 441 | return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), |
| 412 | XFS_I(ndir), &nname, | 442 | XFS_I(ndir), &nname, |
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index e467218c0098..7a989de224f4 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h | |||
| @@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) | |||
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | #define ASSERT_ALWAYS(expr) \ | 333 | #define ASSERT_ALWAYS(expr) \ |
| 334 | (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) | 334 | (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) |
| 335 | 335 | ||
| 336 | #ifdef DEBUG | 336 | #ifdef DEBUG |
| 337 | #define ASSERT(expr) \ | 337 | #define ASSERT(expr) \ |
| 338 | (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) | 338 | (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) |
| 339 | 339 | ||
| 340 | #ifndef STATIC | 340 | #ifndef STATIC |
| 341 | # define STATIC noinline | 341 | # define STATIC noinline |
| @@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) | |||
| 346 | #ifdef XFS_WARN | 346 | #ifdef XFS_WARN |
| 347 | 347 | ||
| 348 | #define ASSERT(expr) \ | 348 | #define ASSERT(expr) \ |
| 349 | (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) | 349 | (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) |
| 350 | 350 | ||
| 351 | #ifndef STATIC | 351 | #ifndef STATIC |
| 352 | # define STATIC static noinline | 352 | # define STATIC static noinline |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index c39ac14ff540..b1469f0a91a6 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
| @@ -3317,12 +3317,8 @@ xfs_log_force( | |||
| 3317 | xfs_mount_t *mp, | 3317 | xfs_mount_t *mp, |
| 3318 | uint flags) | 3318 | uint flags) |
| 3319 | { | 3319 | { |
| 3320 | int error; | ||
| 3321 | |||
| 3322 | trace_xfs_log_force(mp, 0, _RET_IP_); | 3320 | trace_xfs_log_force(mp, 0, _RET_IP_); |
| 3323 | error = _xfs_log_force(mp, flags, NULL); | 3321 | _xfs_log_force(mp, flags, NULL); |
| 3324 | if (error) | ||
| 3325 | xfs_warn(mp, "%s: error %d returned.", __func__, error); | ||
| 3326 | } | 3322 | } |
| 3327 | 3323 | ||
| 3328 | /* | 3324 | /* |
| @@ -3466,12 +3462,8 @@ xfs_log_force_lsn( | |||
| 3466 | xfs_lsn_t lsn, | 3462 | xfs_lsn_t lsn, |
| 3467 | uint flags) | 3463 | uint flags) |
| 3468 | { | 3464 | { |
| 3469 | int error; | ||
| 3470 | |||
| 3471 | trace_xfs_log_force(mp, lsn, _RET_IP_); | 3465 | trace_xfs_log_force(mp, lsn, _RET_IP_); |
| 3472 | error = _xfs_log_force_lsn(mp, lsn, flags, NULL); | 3466 | _xfs_log_force_lsn(mp, lsn, flags, NULL); |
| 3473 | if (error) | ||
| 3474 | xfs_warn(mp, "%s: error %d returned.", __func__, error); | ||
| 3475 | } | 3467 | } |
| 3476 | 3468 | ||
| 3477 | /* | 3469 | /* |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 84f785218907..7f351f706b7a 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
| @@ -140,6 +140,7 @@ typedef struct xfs_mount { | |||
| 140 | int m_fixedfsid[2]; /* unchanged for life of FS */ | 140 | int m_fixedfsid[2]; /* unchanged for life of FS */ |
| 141 | uint m_dmevmask; /* DMI events for this FS */ | 141 | uint m_dmevmask; /* DMI events for this FS */ |
| 142 | __uint64_t m_flags; /* global mount flags */ | 142 | __uint64_t m_flags; /* global mount flags */ |
| 143 | bool m_inotbt_nores; /* no per-AG finobt resv. */ | ||
| 143 | int m_ialloc_inos; /* inodes in inode allocation */ | 144 | int m_ialloc_inos; /* inodes in inode allocation */ |
| 144 | int m_ialloc_blks; /* blocks in inode allocation */ | 145 | int m_ialloc_blks; /* blocks in inode allocation */ |
| 145 | int m_ialloc_min_blks;/* min blocks in sparse inode | 146 | int m_ialloc_min_blks;/* min blocks in sparse inode |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 45e50ea90769..b669b123287b 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
| @@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust( | |||
| 1177 | * the case in all other instances. It's OK that we do this because | 1177 | * the case in all other instances. It's OK that we do this because |
| 1178 | * quotacheck is done only at mount time. | 1178 | * quotacheck is done only at mount time. |
| 1179 | */ | 1179 | */ |
| 1180 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); | 1180 | error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL, |
| 1181 | &ip); | ||
| 1181 | if (error) { | 1182 | if (error) { |
| 1182 | *res = BULKSTAT_RV_NOTHING; | 1183 | *res = BULKSTAT_RV_NOTHING; |
| 1183 | return error; | 1184 | return error; |
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index fe86a668a57e..6e4c7446c3d4 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c | |||
| @@ -526,13 +526,14 @@ xfs_cui_recover( | |||
| 526 | xfs_refcount_finish_one_cleanup(tp, rcur, error); | 526 | xfs_refcount_finish_one_cleanup(tp, rcur, error); |
| 527 | error = xfs_defer_finish(&tp, &dfops, NULL); | 527 | error = xfs_defer_finish(&tp, &dfops, NULL); |
| 528 | if (error) | 528 | if (error) |
| 529 | goto abort_error; | 529 | goto abort_defer; |
| 530 | set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); | 530 | set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); |
| 531 | error = xfs_trans_commit(tp); | 531 | error = xfs_trans_commit(tp); |
| 532 | return error; | 532 | return error; |
| 533 | 533 | ||
| 534 | abort_error: | 534 | abort_error: |
| 535 | xfs_refcount_finish_one_cleanup(tp, rcur, error); | 535 | xfs_refcount_finish_one_cleanup(tp, rcur, error); |
| 536 | abort_defer: | ||
| 536 | xfs_defer_cancel(&dfops); | 537 | xfs_defer_cancel(&dfops); |
| 537 | xfs_trans_cancel(tp); | 538 | xfs_trans_cancel(tp); |
| 538 | return error; | 539 | return error; |
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c index 276d3023d60f..de6195e38910 100644 --- a/fs/xfs/xfs_sysfs.c +++ b/fs/xfs/xfs_sysfs.c | |||
| @@ -396,7 +396,7 @@ max_retries_show( | |||
| 396 | int retries; | 396 | int retries; |
| 397 | struct xfs_error_cfg *cfg = to_error_cfg(kobject); | 397 | struct xfs_error_cfg *cfg = to_error_cfg(kobject); |
| 398 | 398 | ||
| 399 | if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER) | 399 | if (cfg->max_retries == XFS_ERR_RETRY_FOREVER) |
| 400 | retries = -1; | 400 | retries = -1; |
| 401 | else | 401 | else |
| 402 | retries = cfg->max_retries; | 402 | retries = cfg->max_retries; |
| @@ -422,7 +422,7 @@ max_retries_store( | |||
| 422 | return -EINVAL; | 422 | return -EINVAL; |
| 423 | 423 | ||
| 424 | if (val == -1) | 424 | if (val == -1) |
| 425 | cfg->retry_timeout = XFS_ERR_RETRY_FOREVER; | 425 | cfg->max_retries = XFS_ERR_RETRY_FOREVER; |
| 426 | else | 426 | else |
| 427 | cfg->max_retries = val; | 427 | cfg->max_retries = val; |
| 428 | return count; | 428 | return count; |
