diff options
author | Pavel Shilovsky <pshilov@microsoft.com> | 2019-10-22 11:41:42 -0400 |
---|---|---|
committer | Steve French <stfrench@microsoft.com> | 2019-10-24 22:32:32 -0400 |
commit | abe57073d08c13b95a46ccf48cc9dc957d5c6fdb (patch) | |
tree | 6dfd76e9b6eec5488cdc09ff7a66dcf71d86f12c | |
parent | 783bf7b8b641167fb6f3f4f787f60ae62bad41b3 (diff) |
CIFS: Fix retry mid list corruption on reconnects
When the client hits reconnect it iterates over the mid
pending queue marking entries for retry and moving them
to a temporary list to issue callbacks later without holding
GlobalMid_Lock. In the same time there is no guarantee that
mids can't be removed from the temporary list or even
freed completely by another thread. It may cause a temporary
list corruption:
[ 430.454897] list_del corruption. prev->next should be ffff98d3a8f316c0, but was 2e885cb266355469
[ 430.464668] ------------[ cut here ]------------
[ 430.466569] kernel BUG at lib/list_debug.c:51!
[ 430.468476] invalid opcode: 0000 [#1] SMP PTI
[ 430.470286] CPU: 0 PID: 13267 Comm: cifsd Kdump: loaded Not tainted 5.4.0-rc3+ #19
[ 430.473472] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 430.475872] RIP: 0010:__list_del_entry_valid.cold+0x31/0x55
...
[ 430.510426] Call Trace:
[ 430.511500] cifs_reconnect+0x25e/0x610 [cifs]
[ 430.513350] cifs_readv_from_socket+0x220/0x250 [cifs]
[ 430.515464] cifs_read_from_socket+0x4a/0x70 [cifs]
[ 430.517452] ? try_to_wake_up+0x212/0x650
[ 430.519122] ? cifs_small_buf_get+0x16/0x30 [cifs]
[ 430.521086] ? allocate_buffers+0x66/0x120 [cifs]
[ 430.523019] cifs_demultiplex_thread+0xdc/0xc30 [cifs]
[ 430.525116] kthread+0xfb/0x130
[ 430.526421] ? cifs_handle_standard+0x190/0x190 [cifs]
[ 430.528514] ? kthread_park+0x90/0x90
[ 430.530019] ret_from_fork+0x35/0x40
Fix this by obtaining extra references for mids being retried
and marking them as MID_DELETED which indicates that such a mid
has been dequeued from the pending list.
Also move mid cleanup logic from DeleteMidQEntry to
_cifs_mid_q_entry_release which is called when the last reference
to a particular mid is put. This allows to avoid any use-after-free
of response buffers.
The patch needs to be backported to stable kernels. A stable tag
is not mentioned below because the patch doesn't apply cleanly
to any actively maintained stable kernel.
Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
Reviewed-and-tested-by: David Wysochanski <dwysocha@redhat.com>
Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
-rw-r--r-- | fs/cifs/connect.c | 10 | ||||
-rw-r--r-- | fs/cifs/transport.c | 42 |
2 files changed, 32 insertions, 20 deletions
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index bdea4b3e8005..ccaa8bad336f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -564,9 +564,11 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
564 | spin_lock(&GlobalMid_Lock); | 564 | spin_lock(&GlobalMid_Lock); |
565 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { | 565 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
566 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 566 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
567 | kref_get(&mid_entry->refcount); | ||
567 | if (mid_entry->mid_state == MID_REQUEST_SUBMITTED) | 568 | if (mid_entry->mid_state == MID_REQUEST_SUBMITTED) |
568 | mid_entry->mid_state = MID_RETRY_NEEDED; | 569 | mid_entry->mid_state = MID_RETRY_NEEDED; |
569 | list_move(&mid_entry->qhead, &retry_list); | 570 | list_move(&mid_entry->qhead, &retry_list); |
571 | mid_entry->mid_flags |= MID_DELETED; | ||
570 | } | 572 | } |
571 | spin_unlock(&GlobalMid_Lock); | 573 | spin_unlock(&GlobalMid_Lock); |
572 | mutex_unlock(&server->srv_mutex); | 574 | mutex_unlock(&server->srv_mutex); |
@@ -576,6 +578,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
576 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 578 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
577 | list_del_init(&mid_entry->qhead); | 579 | list_del_init(&mid_entry->qhead); |
578 | mid_entry->callback(mid_entry); | 580 | mid_entry->callback(mid_entry); |
581 | cifs_mid_q_entry_release(mid_entry); | ||
579 | } | 582 | } |
580 | 583 | ||
581 | if (cifs_rdma_enabled(server)) { | 584 | if (cifs_rdma_enabled(server)) { |
@@ -895,8 +898,10 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) | |||
895 | if (mid->mid_flags & MID_DELETED) | 898 | if (mid->mid_flags & MID_DELETED) |
896 | printk_once(KERN_WARNING | 899 | printk_once(KERN_WARNING |
897 | "trying to dequeue a deleted mid\n"); | 900 | "trying to dequeue a deleted mid\n"); |
898 | else | 901 | else { |
899 | list_del_init(&mid->qhead); | 902 | list_del_init(&mid->qhead); |
903 | mid->mid_flags |= MID_DELETED; | ||
904 | } | ||
900 | spin_unlock(&GlobalMid_Lock); | 905 | spin_unlock(&GlobalMid_Lock); |
901 | } | 906 | } |
902 | 907 | ||
@@ -966,8 +971,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) | |||
966 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { | 971 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
967 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 972 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
968 | cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid); | 973 | cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid); |
974 | kref_get(&mid_entry->refcount); | ||
969 | mid_entry->mid_state = MID_SHUTDOWN; | 975 | mid_entry->mid_state = MID_SHUTDOWN; |
970 | list_move(&mid_entry->qhead, &dispose_list); | 976 | list_move(&mid_entry->qhead, &dispose_list); |
977 | mid_entry->mid_flags |= MID_DELETED; | ||
971 | } | 978 | } |
972 | spin_unlock(&GlobalMid_Lock); | 979 | spin_unlock(&GlobalMid_Lock); |
973 | 980 | ||
@@ -977,6 +984,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) | |||
977 | cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid); | 984 | cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid); |
978 | list_del_init(&mid_entry->qhead); | 985 | list_del_init(&mid_entry->qhead); |
979 | mid_entry->callback(mid_entry); | 986 | mid_entry->callback(mid_entry); |
987 | cifs_mid_q_entry_release(mid_entry); | ||
980 | } | 988 | } |
981 | /* 1/8th of sec is more than enough time for them to exit */ | 989 | /* 1/8th of sec is more than enough time for them to exit */ |
982 | msleep(125); | 990 | msleep(125); |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 308ad0f495e1..ca3de62688d6 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -86,22 +86,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | |||
86 | 86 | ||
87 | static void _cifs_mid_q_entry_release(struct kref *refcount) | 87 | static void _cifs_mid_q_entry_release(struct kref *refcount) |
88 | { | 88 | { |
89 | struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, | 89 | struct mid_q_entry *midEntry = |
90 | refcount); | 90 | container_of(refcount, struct mid_q_entry, refcount); |
91 | |||
92 | mempool_free(mid, cifs_mid_poolp); | ||
93 | } | ||
94 | |||
95 | void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) | ||
96 | { | ||
97 | spin_lock(&GlobalMid_Lock); | ||
98 | kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); | ||
99 | spin_unlock(&GlobalMid_Lock); | ||
100 | } | ||
101 | |||
102 | void | ||
103 | DeleteMidQEntry(struct mid_q_entry *midEntry) | ||
104 | { | ||
105 | #ifdef CONFIG_CIFS_STATS2 | 91 | #ifdef CONFIG_CIFS_STATS2 |
106 | __le16 command = midEntry->server->vals->lock_cmd; | 92 | __le16 command = midEntry->server->vals->lock_cmd; |
107 | __u16 smb_cmd = le16_to_cpu(midEntry->command); | 93 | __u16 smb_cmd = le16_to_cpu(midEntry->command); |
@@ -166,6 +152,19 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
166 | } | 152 | } |
167 | } | 153 | } |
168 | #endif | 154 | #endif |
155 | |||
156 | mempool_free(midEntry, cifs_mid_poolp); | ||
157 | } | ||
158 | |||
159 | void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) | ||
160 | { | ||
161 | spin_lock(&GlobalMid_Lock); | ||
162 | kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); | ||
163 | spin_unlock(&GlobalMid_Lock); | ||
164 | } | ||
165 | |||
166 | void DeleteMidQEntry(struct mid_q_entry *midEntry) | ||
167 | { | ||
169 | cifs_mid_q_entry_release(midEntry); | 168 | cifs_mid_q_entry_release(midEntry); |
170 | } | 169 | } |
171 | 170 | ||
@@ -173,8 +172,10 @@ void | |||
173 | cifs_delete_mid(struct mid_q_entry *mid) | 172 | cifs_delete_mid(struct mid_q_entry *mid) |
174 | { | 173 | { |
175 | spin_lock(&GlobalMid_Lock); | 174 | spin_lock(&GlobalMid_Lock); |
176 | list_del_init(&mid->qhead); | 175 | if (!(mid->mid_flags & MID_DELETED)) { |
177 | mid->mid_flags |= MID_DELETED; | 176 | list_del_init(&mid->qhead); |
177 | mid->mid_flags |= MID_DELETED; | ||
178 | } | ||
178 | spin_unlock(&GlobalMid_Lock); | 179 | spin_unlock(&GlobalMid_Lock); |
179 | 180 | ||
180 | DeleteMidQEntry(mid); | 181 | DeleteMidQEntry(mid); |
@@ -872,7 +873,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | |||
872 | rc = -EHOSTDOWN; | 873 | rc = -EHOSTDOWN; |
873 | break; | 874 | break; |
874 | default: | 875 | default: |
875 | list_del_init(&mid->qhead); | 876 | if (!(mid->mid_flags & MID_DELETED)) { |
877 | list_del_init(&mid->qhead); | ||
878 | mid->mid_flags |= MID_DELETED; | ||
879 | } | ||
876 | cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n", | 880 | cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n", |
877 | __func__, mid->mid, mid->mid_state); | 881 | __func__, mid->mid, mid->mid_state); |
878 | rc = -EIO; | 882 | rc = -EIO; |