diff options
author | Jeff Layton <jlayton@redhat.com> | 2011-01-11 07:24:21 -0500 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2011-01-20 12:43:59 -0500 |
commit | 2b84a36c5529da136d28b268e75268892d09869c (patch) | |
tree | 7977fad1c4a8ae8926184c00a3e7ccd30b398e5e /fs/cifs/connect.c | |
parent | 74dd92a881b62014ca3c754db6868e1f142f2fb9 (diff) |
cifs: allow for different handling of received response
In order to incorporate async requests, we need to allow for a more
general way to do things on receive, rather than just waking up a
process.
Turn the task pointer in the mid_q_entry into a callback function and a
generic data pointer. When a response comes in, or the socket is
reconnected, cifsd can call the callback function in order to wake up
the process.
The default is to just wake up the current process which should mean no
change in behavior for existing code.
Also, clean up the locking in cifs_reconnect. There doesn't seem to be
any need to hold both the srv_mutex and GlobalMid_Lock when walking the
list of mids.
Reviewed-by: Suresh Jayaraman <sjayaraman@suse.de>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/connect.c')
-rw-r--r-- | fs/cifs/connect.c | 53 |
1 files changed, 25 insertions, 28 deletions
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5c7f8450dbe0..aa66de1db5f5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -152,6 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
152 | 152 | ||
153 | /* before reconnecting the tcp session, mark the smb session (uid) | 153 | /* before reconnecting the tcp session, mark the smb session (uid) |
154 | and the tid bad so they are not used until reconnected */ | 154 | and the tid bad so they are not used until reconnected */ |
155 | cFYI(1, "%s: marking sessions and tcons for reconnect", __func__); | ||
155 | spin_lock(&cifs_tcp_ses_lock); | 156 | spin_lock(&cifs_tcp_ses_lock); |
156 | list_for_each(tmp, &server->smb_ses_list) { | 157 | list_for_each(tmp, &server->smb_ses_list) { |
157 | ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); | 158 | ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); |
@@ -163,7 +164,9 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
163 | } | 164 | } |
164 | } | 165 | } |
165 | spin_unlock(&cifs_tcp_ses_lock); | 166 | spin_unlock(&cifs_tcp_ses_lock); |
167 | |||
166 | /* do not want to be sending data on a socket we are freeing */ | 168 | /* do not want to be sending data on a socket we are freeing */ |
169 | cFYI(1, "%s: tearing down socket", __func__); | ||
167 | mutex_lock(&server->srv_mutex); | 170 | mutex_lock(&server->srv_mutex); |
168 | if (server->ssocket) { | 171 | if (server->ssocket) { |
169 | cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state, | 172 | cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state, |
@@ -180,22 +183,19 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
180 | kfree(server->session_key.response); | 183 | kfree(server->session_key.response); |
181 | server->session_key.response = NULL; | 184 | server->session_key.response = NULL; |
182 | server->session_key.len = 0; | 185 | server->session_key.len = 0; |
186 | mutex_unlock(&server->srv_mutex); | ||
183 | 187 | ||
188 | /* mark submitted MIDs for retry and issue callback */ | ||
189 | cFYI(1, "%s: issuing mid callbacks", __func__); | ||
184 | spin_lock(&GlobalMid_Lock); | 190 | spin_lock(&GlobalMid_Lock); |
185 | list_for_each(tmp, &server->pending_mid_q) { | 191 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
186 | mid_entry = list_entry(tmp, struct | 192 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
187 | mid_q_entry, | 193 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) |
188 | qhead); | ||
189 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) { | ||
190 | /* Mark other intransit requests as needing | ||
191 | retry so we do not immediately mark the | ||
192 | session bad again (ie after we reconnect | ||
193 | below) as they timeout too */ | ||
194 | mid_entry->midState = MID_RETRY_NEEDED; | 194 | mid_entry->midState = MID_RETRY_NEEDED; |
195 | } | 195 | list_del_init(&mid_entry->qhead); |
196 | mid_entry->callback(mid_entry); | ||
196 | } | 197 | } |
197 | spin_unlock(&GlobalMid_Lock); | 198 | spin_unlock(&GlobalMid_Lock); |
198 | mutex_unlock(&server->srv_mutex); | ||
199 | 199 | ||
200 | while ((server->tcpStatus != CifsExiting) && | 200 | while ((server->tcpStatus != CifsExiting) && |
201 | (server->tcpStatus != CifsGood)) { | 201 | (server->tcpStatus != CifsGood)) { |
@@ -212,10 +212,9 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
212 | if (server->tcpStatus != CifsExiting) | 212 | if (server->tcpStatus != CifsExiting) |
213 | server->tcpStatus = CifsGood; | 213 | server->tcpStatus = CifsGood; |
214 | spin_unlock(&GlobalMid_Lock); | 214 | spin_unlock(&GlobalMid_Lock); |
215 | /* atomic_set(&server->inFlight,0);*/ | ||
216 | wake_up(&server->response_q); | ||
217 | } | 215 | } |
218 | } | 216 | } |
217 | |||
219 | return rc; | 218 | return rc; |
220 | } | 219 | } |
221 | 220 | ||
@@ -345,7 +344,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
345 | struct msghdr smb_msg; | 344 | struct msghdr smb_msg; |
346 | struct kvec iov; | 345 | struct kvec iov; |
347 | struct socket *csocket = server->ssocket; | 346 | struct socket *csocket = server->ssocket; |
348 | struct list_head *tmp; | 347 | struct list_head *tmp, *tmp2; |
349 | struct task_struct *task_to_wake = NULL; | 348 | struct task_struct *task_to_wake = NULL; |
350 | struct mid_q_entry *mid_entry; | 349 | struct mid_q_entry *mid_entry; |
351 | char temp; | 350 | char temp; |
@@ -558,10 +557,9 @@ incomplete_rcv: | |||
558 | continue; | 557 | continue; |
559 | } | 558 | } |
560 | 559 | ||
561 | 560 | mid_entry = NULL; | |
562 | task_to_wake = NULL; | ||
563 | spin_lock(&GlobalMid_Lock); | 561 | spin_lock(&GlobalMid_Lock); |
564 | list_for_each(tmp, &server->pending_mid_q) { | 562 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
565 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 563 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
566 | 564 | ||
567 | if ((mid_entry->mid == smb_buffer->Mid) && | 565 | if ((mid_entry->mid == smb_buffer->Mid) && |
@@ -602,8 +600,9 @@ incomplete_rcv: | |||
602 | mid_entry->resp_buf = smb_buffer; | 600 | mid_entry->resp_buf = smb_buffer; |
603 | mid_entry->largeBuf = isLargeBuf; | 601 | mid_entry->largeBuf = isLargeBuf; |
604 | multi_t2_fnd: | 602 | multi_t2_fnd: |
605 | task_to_wake = mid_entry->tsk; | ||
606 | mid_entry->midState = MID_RESPONSE_RECEIVED; | 603 | mid_entry->midState = MID_RESPONSE_RECEIVED; |
604 | list_del_init(&mid_entry->qhead); | ||
605 | mid_entry->callback(mid_entry); | ||
607 | #ifdef CONFIG_CIFS_STATS2 | 606 | #ifdef CONFIG_CIFS_STATS2 |
608 | mid_entry->when_received = jiffies; | 607 | mid_entry->when_received = jiffies; |
609 | #endif | 608 | #endif |
@@ -613,9 +612,11 @@ multi_t2_fnd: | |||
613 | server->lstrp = jiffies; | 612 | server->lstrp = jiffies; |
614 | break; | 613 | break; |
615 | } | 614 | } |
615 | mid_entry = NULL; | ||
616 | } | 616 | } |
617 | spin_unlock(&GlobalMid_Lock); | 617 | spin_unlock(&GlobalMid_Lock); |
618 | if (task_to_wake) { | 618 | |
619 | if (mid_entry != NULL) { | ||
619 | /* Was previous buf put in mpx struct for multi-rsp? */ | 620 | /* Was previous buf put in mpx struct for multi-rsp? */ |
620 | if (!isMultiRsp) { | 621 | if (!isMultiRsp) { |
621 | /* smb buffer will be freed by user thread */ | 622 | /* smb buffer will be freed by user thread */ |
@@ -624,7 +625,6 @@ multi_t2_fnd: | |||
624 | else | 625 | else |
625 | smallbuf = NULL; | 626 | smallbuf = NULL; |
626 | } | 627 | } |
627 | wake_up_process(task_to_wake); | ||
628 | } else if (!is_valid_oplock_break(smb_buffer, server) && | 628 | } else if (!is_valid_oplock_break(smb_buffer, server) && |
629 | !isMultiRsp) { | 629 | !isMultiRsp) { |
630 | cERROR(1, "No task to wake, unknown frame received! " | 630 | cERROR(1, "No task to wake, unknown frame received! " |
@@ -678,15 +678,12 @@ multi_t2_fnd: | |||
678 | 678 | ||
679 | if (!list_empty(&server->pending_mid_q)) { | 679 | if (!list_empty(&server->pending_mid_q)) { |
680 | spin_lock(&GlobalMid_Lock); | 680 | spin_lock(&GlobalMid_Lock); |
681 | list_for_each(tmp, &server->pending_mid_q) { | 681 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
682 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 682 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
683 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) { | 683 | cFYI(1, "Clearing Mid 0x%x - issuing callback", |
684 | cFYI(1, "Clearing Mid 0x%x - waking up ", | 684 | mid_entry->mid); |
685 | mid_entry->mid); | 685 | list_del_init(&mid_entry->qhead); |
686 | task_to_wake = mid_entry->tsk; | 686 | mid_entry->callback(mid_entry); |
687 | if (task_to_wake) | ||
688 | wake_up_process(task_to_wake); | ||
689 | } | ||
690 | } | 687 | } |
691 | spin_unlock(&GlobalMid_Lock); | 688 | spin_unlock(&GlobalMid_Lock); |
692 | /* 1/8th of sec is more than enough time for them to exit */ | 689 | /* 1/8th of sec is more than enough time for them to exit */ |