diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-29 17:02:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-29 17:02:20 -0500 |
commit | 6787dc24b72b88404ae652c914014e51ddf1c4fa (patch) | |
tree | 8248932bcf58891233193b2662e4de804736b6a1 | |
parent | a4b7fd7d34de5765dece2dd08060d2e1f7be3b39 (diff) | |
parent | 36c7ce4a17f220398e12e588ea3484265df4c41c (diff) |
Merge tag '4.16-rc-SMB3' of git://git.samba.org/sfrench/cifs-2.6
Pull cifs updates from Steve French:
"Some fixes for stable, fixed SMB3 DFS support, SMB3 Direct (RDMA) and
various bug fixes and cleanup"
* tag '4.16-rc-SMB3' of git://git.samba.org/sfrench/cifs-2.6: (60 commits)
fs/cifs/cifsacl.c Fixes typo in a comment
update internal version number for cifs.ko
cifs: add .splice_write
CIFS: document tcon/ses/server refcount dance
move a few externs to smbdirect.h to eliminate warning
CIFS: zero sensitive data when freeing
Cleanup some minor endian issues in smb3 rdma
CIFS: dump IPC tcon in debug proc file
CIFS: use tcon_ipc instead of use_ipc parameter of SMB2_ioctl
CIFS: make IPC a regular tcon
cifs: remove redundant duplicated assignment of pointer 'node'
CIFS: SMBD: work around gcc -Wmaybe-uninitialized warning
cifs: Fix autonegotiate security settings mismatch
CIFS: SMBD: _smbd_get_connection() can be static
CIFS: SMBD: Disable signing on SMB direct transport
CIFS: SMBD: Add SMB Direct debug counters
CIFS: SMBD: Upper layer performs SMB read via RDMA write through memory registration
CIFS: SMBD: Read correct returned data length for RDMA write (SMB read) I/O
CIFS: SMBD: Upper layer performs SMB write via RDMA read through memory registration
CIFS: SMBD: Implement RDMA memory registration
...
-rw-r--r-- | fs/cifs/Kconfig | 8 | ||||
-rw-r--r-- | fs/cifs/Makefile | 2 | ||||
-rw-r--r-- | fs/cifs/cifs_debug.c | 199 | ||||
-rw-r--r-- | fs/cifs/cifsacl.c | 2 | ||||
-rw-r--r-- | fs/cifs/cifsencrypt.c | 3 | ||||
-rw-r--r-- | fs/cifs/cifsfs.c | 8 | ||||
-rw-r--r-- | fs/cifs/cifsfs.h | 2 | ||||
-rw-r--r-- | fs/cifs/cifsglob.h | 35 | ||||
-rw-r--r-- | fs/cifs/cifsproto.h | 4 | ||||
-rw-r--r-- | fs/cifs/cifssmb.c | 22 | ||||
-rw-r--r-- | fs/cifs/connect.c | 251 | ||||
-rw-r--r-- | fs/cifs/file.c | 43 | ||||
-rw-r--r-- | fs/cifs/inode.c | 2 | ||||
-rw-r--r-- | fs/cifs/misc.c | 14 | ||||
-rw-r--r-- | fs/cifs/smb1ops.c | 4 | ||||
-rw-r--r-- | fs/cifs/smb2file.c | 2 | ||||
-rw-r--r-- | fs/cifs/smb2misc.c | 2 | ||||
-rw-r--r-- | fs/cifs/smb2ops.c | 77 | ||||
-rw-r--r-- | fs/cifs/smb2pdu.c | 578 | ||||
-rw-r--r-- | fs/cifs/smb2pdu.h | 60 | ||||
-rw-r--r-- | fs/cifs/smb2proto.h | 3 | ||||
-rw-r--r-- | fs/cifs/smbdirect.c | 2610 | ||||
-rw-r--r-- | fs/cifs/smbdirect.h | 338 | ||||
-rw-r--r-- | fs/cifs/transport.c | 69 |
24 files changed, 3896 insertions, 442 deletions
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index d5b2e12b5d02..c71971c01c63 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -196,6 +196,14 @@ config CIFS_SMB311 | |||
196 | This dialect includes improved security negotiation features. | 196 | This dialect includes improved security negotiation features. |
197 | If unsure, say N | 197 | If unsure, say N |
198 | 198 | ||
199 | config CIFS_SMB_DIRECT | ||
200 | bool "SMB Direct support (Experimental)" | ||
201 | depends on CIFS=m && INFINIBAND || CIFS=y && INFINIBAND=y | ||
202 | help | ||
203 | Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. | ||
204 | SMB Direct allows transferring SMB packets over RDMA. If unsure, | ||
205 | say N. | ||
206 | |||
199 | config CIFS_FSCACHE | 207 | config CIFS_FSCACHE |
200 | bool "Provide CIFS client caching support" | 208 | bool "Provide CIFS client caching support" |
201 | depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y | 209 | depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y |
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 7134f182720b..7e4a1e2f0696 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile | |||
@@ -19,3 +19,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o | |||
19 | cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o | 19 | cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o |
20 | 20 | ||
21 | cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o | 21 | cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o |
22 | |||
23 | cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o | ||
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index cbb9534b89b4..c7a863219fa3 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -30,6 +30,9 @@ | |||
30 | #include "cifsproto.h" | 30 | #include "cifsproto.h" |
31 | #include "cifs_debug.h" | 31 | #include "cifs_debug.h" |
32 | #include "cifsfs.h" | 32 | #include "cifsfs.h" |
33 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
34 | #include "smbdirect.h" | ||
35 | #endif | ||
33 | 36 | ||
34 | void | 37 | void |
35 | cifs_dump_mem(char *label, void *data, int length) | 38 | cifs_dump_mem(char *label, void *data, int length) |
@@ -107,6 +110,32 @@ void cifs_dump_mids(struct TCP_Server_Info *server) | |||
107 | } | 110 | } |
108 | 111 | ||
109 | #ifdef CONFIG_PROC_FS | 112 | #ifdef CONFIG_PROC_FS |
113 | static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon) | ||
114 | { | ||
115 | __u32 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); | ||
116 | |||
117 | seq_printf(m, "%s Mounts: %d ", tcon->treeName, tcon->tc_count); | ||
118 | if (tcon->nativeFileSystem) | ||
119 | seq_printf(m, "Type: %s ", tcon->nativeFileSystem); | ||
120 | seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d", | ||
121 | le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), | ||
122 | le32_to_cpu(tcon->fsAttrInfo.Attributes), | ||
123 | le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), | ||
124 | tcon->tidStatus); | ||
125 | if (dev_type == FILE_DEVICE_DISK) | ||
126 | seq_puts(m, " type: DISK "); | ||
127 | else if (dev_type == FILE_DEVICE_CD_ROM) | ||
128 | seq_puts(m, " type: CDROM "); | ||
129 | else | ||
130 | seq_printf(m, " type: %d ", dev_type); | ||
131 | if (tcon->ses->server->ops->dump_share_caps) | ||
132 | tcon->ses->server->ops->dump_share_caps(m, tcon); | ||
133 | |||
134 | if (tcon->need_reconnect) | ||
135 | seq_puts(m, "\tDISCONNECTED "); | ||
136 | seq_putc(m, '\n'); | ||
137 | } | ||
138 | |||
110 | static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | 139 | static int cifs_debug_data_proc_show(struct seq_file *m, void *v) |
111 | { | 140 | { |
112 | struct list_head *tmp1, *tmp2, *tmp3; | 141 | struct list_head *tmp1, *tmp2, *tmp3; |
@@ -115,7 +144,6 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
115 | struct cifs_ses *ses; | 144 | struct cifs_ses *ses; |
116 | struct cifs_tcon *tcon; | 145 | struct cifs_tcon *tcon; |
117 | int i, j; | 146 | int i, j; |
118 | __u32 dev_type; | ||
119 | 147 | ||
120 | seq_puts(m, | 148 | seq_puts(m, |
121 | "Display Internal CIFS Data Structures for Debugging\n" | 149 | "Display Internal CIFS Data Structures for Debugging\n" |
@@ -152,6 +180,72 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
152 | list_for_each(tmp1, &cifs_tcp_ses_list) { | 180 | list_for_each(tmp1, &cifs_tcp_ses_list) { |
153 | server = list_entry(tmp1, struct TCP_Server_Info, | 181 | server = list_entry(tmp1, struct TCP_Server_Info, |
154 | tcp_ses_list); | 182 | tcp_ses_list); |
183 | |||
184 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
185 | if (!server->rdma) | ||
186 | goto skip_rdma; | ||
187 | |||
188 | seq_printf(m, "\nSMBDirect (in hex) protocol version: %x " | ||
189 | "transport status: %x", | ||
190 | server->smbd_conn->protocol, | ||
191 | server->smbd_conn->transport_status); | ||
192 | seq_printf(m, "\nConn receive_credit_max: %x " | ||
193 | "send_credit_target: %x max_send_size: %x", | ||
194 | server->smbd_conn->receive_credit_max, | ||
195 | server->smbd_conn->send_credit_target, | ||
196 | server->smbd_conn->max_send_size); | ||
197 | seq_printf(m, "\nConn max_fragmented_recv_size: %x " | ||
198 | "max_fragmented_send_size: %x max_receive_size:%x", | ||
199 | server->smbd_conn->max_fragmented_recv_size, | ||
200 | server->smbd_conn->max_fragmented_send_size, | ||
201 | server->smbd_conn->max_receive_size); | ||
202 | seq_printf(m, "\nConn keep_alive_interval: %x " | ||
203 | "max_readwrite_size: %x rdma_readwrite_threshold: %x", | ||
204 | server->smbd_conn->keep_alive_interval, | ||
205 | server->smbd_conn->max_readwrite_size, | ||
206 | server->smbd_conn->rdma_readwrite_threshold); | ||
207 | seq_printf(m, "\nDebug count_get_receive_buffer: %x " | ||
208 | "count_put_receive_buffer: %x count_send_empty: %x", | ||
209 | server->smbd_conn->count_get_receive_buffer, | ||
210 | server->smbd_conn->count_put_receive_buffer, | ||
211 | server->smbd_conn->count_send_empty); | ||
212 | seq_printf(m, "\nRead Queue count_reassembly_queue: %x " | ||
213 | "count_enqueue_reassembly_queue: %x " | ||
214 | "count_dequeue_reassembly_queue: %x " | ||
215 | "fragment_reassembly_remaining: %x " | ||
216 | "reassembly_data_length: %x " | ||
217 | "reassembly_queue_length: %x", | ||
218 | server->smbd_conn->count_reassembly_queue, | ||
219 | server->smbd_conn->count_enqueue_reassembly_queue, | ||
220 | server->smbd_conn->count_dequeue_reassembly_queue, | ||
221 | server->smbd_conn->fragment_reassembly_remaining, | ||
222 | server->smbd_conn->reassembly_data_length, | ||
223 | server->smbd_conn->reassembly_queue_length); | ||
224 | seq_printf(m, "\nCurrent Credits send_credits: %x " | ||
225 | "receive_credits: %x receive_credit_target: %x", | ||
226 | atomic_read(&server->smbd_conn->send_credits), | ||
227 | atomic_read(&server->smbd_conn->receive_credits), | ||
228 | server->smbd_conn->receive_credit_target); | ||
229 | seq_printf(m, "\nPending send_pending: %x send_payload_pending:" | ||
230 | " %x smbd_send_pending: %x smbd_recv_pending: %x", | ||
231 | atomic_read(&server->smbd_conn->send_pending), | ||
232 | atomic_read(&server->smbd_conn->send_payload_pending), | ||
233 | server->smbd_conn->smbd_send_pending, | ||
234 | server->smbd_conn->smbd_recv_pending); | ||
235 | seq_printf(m, "\nReceive buffers count_receive_queue: %x " | ||
236 | "count_empty_packet_queue: %x", | ||
237 | server->smbd_conn->count_receive_queue, | ||
238 | server->smbd_conn->count_empty_packet_queue); | ||
239 | seq_printf(m, "\nMR responder_resources: %x " | ||
240 | "max_frmr_depth: %x mr_type: %x", | ||
241 | server->smbd_conn->responder_resources, | ||
242 | server->smbd_conn->max_frmr_depth, | ||
243 | server->smbd_conn->mr_type); | ||
244 | seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x", | ||
245 | atomic_read(&server->smbd_conn->mr_ready_count), | ||
246 | atomic_read(&server->smbd_conn->mr_used_count)); | ||
247 | skip_rdma: | ||
248 | #endif | ||
155 | seq_printf(m, "\nNumber of credits: %d", server->credits); | 249 | seq_printf(m, "\nNumber of credits: %d", server->credits); |
156 | i++; | 250 | i++; |
157 | list_for_each(tmp2, &server->smb_ses_list) { | 251 | list_for_each(tmp2, &server->smb_ses_list) { |
@@ -176,6 +270,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
176 | ses->ses_count, ses->serverOS, ses->serverNOS, | 270 | ses->ses_count, ses->serverOS, ses->serverNOS, |
177 | ses->capabilities, ses->status); | 271 | ses->capabilities, ses->status); |
178 | } | 272 | } |
273 | if (server->rdma) | ||
274 | seq_printf(m, "RDMA\n\t"); | ||
179 | seq_printf(m, "TCP status: %d\n\tLocal Users To " | 275 | seq_printf(m, "TCP status: %d\n\tLocal Users To " |
180 | "Server: %d SecMode: 0x%x Req On Wire: %d", | 276 | "Server: %d SecMode: 0x%x Req On Wire: %d", |
181 | server->tcpStatus, server->srv_count, | 277 | server->tcpStatus, server->srv_count, |
@@ -189,35 +285,19 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
189 | 285 | ||
190 | seq_puts(m, "\n\tShares:"); | 286 | seq_puts(m, "\n\tShares:"); |
191 | j = 0; | 287 | j = 0; |
288 | |||
289 | seq_printf(m, "\n\t%d) IPC: ", j); | ||
290 | if (ses->tcon_ipc) | ||
291 | cifs_debug_tcon(m, ses->tcon_ipc); | ||
292 | else | ||
293 | seq_puts(m, "none\n"); | ||
294 | |||
192 | list_for_each(tmp3, &ses->tcon_list) { | 295 | list_for_each(tmp3, &ses->tcon_list) { |
193 | tcon = list_entry(tmp3, struct cifs_tcon, | 296 | tcon = list_entry(tmp3, struct cifs_tcon, |
194 | tcon_list); | 297 | tcon_list); |
195 | ++j; | 298 | ++j; |
196 | dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); | 299 | seq_printf(m, "\n\t%d) ", j); |
197 | seq_printf(m, "\n\t%d) %s Mounts: %d ", j, | 300 | cifs_debug_tcon(m, tcon); |
198 | tcon->treeName, tcon->tc_count); | ||
199 | if (tcon->nativeFileSystem) { | ||
200 | seq_printf(m, "Type: %s ", | ||
201 | tcon->nativeFileSystem); | ||
202 | } | ||
203 | seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x" | ||
204 | "\n\tPathComponentMax: %d Status: %d", | ||
205 | le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), | ||
206 | le32_to_cpu(tcon->fsAttrInfo.Attributes), | ||
207 | le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), | ||
208 | tcon->tidStatus); | ||
209 | if (dev_type == FILE_DEVICE_DISK) | ||
210 | seq_puts(m, " type: DISK "); | ||
211 | else if (dev_type == FILE_DEVICE_CD_ROM) | ||
212 | seq_puts(m, " type: CDROM "); | ||
213 | else | ||
214 | seq_printf(m, " type: %d ", dev_type); | ||
215 | if (server->ops->dump_share_caps) | ||
216 | server->ops->dump_share_caps(m, tcon); | ||
217 | |||
218 | if (tcon->need_reconnect) | ||
219 | seq_puts(m, "\tDISCONNECTED "); | ||
220 | seq_putc(m, '\n'); | ||
221 | } | 301 | } |
222 | 302 | ||
223 | seq_puts(m, "\n\tMIDs:\n"); | 303 | seq_puts(m, "\n\tMIDs:\n"); |
@@ -374,6 +454,45 @@ static const struct file_operations cifs_stats_proc_fops = { | |||
374 | }; | 454 | }; |
375 | #endif /* STATS */ | 455 | #endif /* STATS */ |
376 | 456 | ||
457 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
458 | #define PROC_FILE_DEFINE(name) \ | ||
459 | static ssize_t name##_write(struct file *file, const char __user *buffer, \ | ||
460 | size_t count, loff_t *ppos) \ | ||
461 | { \ | ||
462 | int rc; \ | ||
463 | rc = kstrtoint_from_user(buffer, count, 10, & name); \ | ||
464 | if (rc) \ | ||
465 | return rc; \ | ||
466 | return count; \ | ||
467 | } \ | ||
468 | static int name##_proc_show(struct seq_file *m, void *v) \ | ||
469 | { \ | ||
470 | seq_printf(m, "%d\n", name ); \ | ||
471 | return 0; \ | ||
472 | } \ | ||
473 | static int name##_open(struct inode *inode, struct file *file) \ | ||
474 | { \ | ||
475 | return single_open(file, name##_proc_show, NULL); \ | ||
476 | } \ | ||
477 | \ | ||
478 | static const struct file_operations cifs_##name##_proc_fops = { \ | ||
479 | .open = name##_open, \ | ||
480 | .read = seq_read, \ | ||
481 | .llseek = seq_lseek, \ | ||
482 | .release = single_release, \ | ||
483 | .write = name##_write, \ | ||
484 | } | ||
485 | |||
486 | PROC_FILE_DEFINE(rdma_readwrite_threshold); | ||
487 | PROC_FILE_DEFINE(smbd_max_frmr_depth); | ||
488 | PROC_FILE_DEFINE(smbd_keep_alive_interval); | ||
489 | PROC_FILE_DEFINE(smbd_max_receive_size); | ||
490 | PROC_FILE_DEFINE(smbd_max_fragmented_recv_size); | ||
491 | PROC_FILE_DEFINE(smbd_max_send_size); | ||
492 | PROC_FILE_DEFINE(smbd_send_credit_target); | ||
493 | PROC_FILE_DEFINE(smbd_receive_credit_max); | ||
494 | #endif | ||
495 | |||
377 | static struct proc_dir_entry *proc_fs_cifs; | 496 | static struct proc_dir_entry *proc_fs_cifs; |
378 | static const struct file_operations cifsFYI_proc_fops; | 497 | static const struct file_operations cifsFYI_proc_fops; |
379 | static const struct file_operations cifs_lookup_cache_proc_fops; | 498 | static const struct file_operations cifs_lookup_cache_proc_fops; |
@@ -401,6 +520,24 @@ cifs_proc_init(void) | |||
401 | &cifs_security_flags_proc_fops); | 520 | &cifs_security_flags_proc_fops); |
402 | proc_create("LookupCacheEnabled", 0, proc_fs_cifs, | 521 | proc_create("LookupCacheEnabled", 0, proc_fs_cifs, |
403 | &cifs_lookup_cache_proc_fops); | 522 | &cifs_lookup_cache_proc_fops); |
523 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
524 | proc_create("rdma_readwrite_threshold", 0, proc_fs_cifs, | ||
525 | &cifs_rdma_readwrite_threshold_proc_fops); | ||
526 | proc_create("smbd_max_frmr_depth", 0, proc_fs_cifs, | ||
527 | &cifs_smbd_max_frmr_depth_proc_fops); | ||
528 | proc_create("smbd_keep_alive_interval", 0, proc_fs_cifs, | ||
529 | &cifs_smbd_keep_alive_interval_proc_fops); | ||
530 | proc_create("smbd_max_receive_size", 0, proc_fs_cifs, | ||
531 | &cifs_smbd_max_receive_size_proc_fops); | ||
532 | proc_create("smbd_max_fragmented_recv_size", 0, proc_fs_cifs, | ||
533 | &cifs_smbd_max_fragmented_recv_size_proc_fops); | ||
534 | proc_create("smbd_max_send_size", 0, proc_fs_cifs, | ||
535 | &cifs_smbd_max_send_size_proc_fops); | ||
536 | proc_create("smbd_send_credit_target", 0, proc_fs_cifs, | ||
537 | &cifs_smbd_send_credit_target_proc_fops); | ||
538 | proc_create("smbd_receive_credit_max", 0, proc_fs_cifs, | ||
539 | &cifs_smbd_receive_credit_max_proc_fops); | ||
540 | #endif | ||
404 | } | 541 | } |
405 | 542 | ||
406 | void | 543 | void |
@@ -418,6 +555,16 @@ cifs_proc_clean(void) | |||
418 | remove_proc_entry("SecurityFlags", proc_fs_cifs); | 555 | remove_proc_entry("SecurityFlags", proc_fs_cifs); |
419 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); | 556 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); |
420 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); | 557 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); |
558 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
559 | remove_proc_entry("rdma_readwrite_threshold", proc_fs_cifs); | ||
560 | remove_proc_entry("smbd_max_frmr_depth", proc_fs_cifs); | ||
561 | remove_proc_entry("smbd_keep_alive_interval", proc_fs_cifs); | ||
562 | remove_proc_entry("smbd_max_receive_size", proc_fs_cifs); | ||
563 | remove_proc_entry("smbd_max_fragmented_recv_size", proc_fs_cifs); | ||
564 | remove_proc_entry("smbd_max_send_size", proc_fs_cifs); | ||
565 | remove_proc_entry("smbd_send_credit_target", proc_fs_cifs); | ||
566 | remove_proc_entry("smbd_receive_credit_max", proc_fs_cifs); | ||
567 | #endif | ||
421 | remove_proc_entry("fs/cifs", NULL); | 568 | remove_proc_entry("fs/cifs", NULL); |
422 | } | 569 | } |
423 | 570 | ||
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index b98436f5c7c7..13a8a77322c9 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -1125,7 +1125,7 @@ out: | |||
1125 | return rc; | 1125 | return rc; |
1126 | } | 1126 | } |
1127 | 1127 | ||
1128 | /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ | 1128 | /* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */ |
1129 | int | 1129 | int |
1130 | cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, | 1130 | cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, |
1131 | struct inode *inode, const char *path, | 1131 | struct inode *inode, const char *path, |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 68abbb0db608..f2b0a7f124da 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, | |||
325 | { | 325 | { |
326 | int i; | 326 | int i; |
327 | int rc; | 327 | int rc; |
328 | char password_with_pad[CIFS_ENCPWD_SIZE]; | 328 | char password_with_pad[CIFS_ENCPWD_SIZE] = {0}; |
329 | 329 | ||
330 | memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); | ||
331 | if (password) | 330 | if (password) |
332 | strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); | 331 | strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); |
333 | 332 | ||
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 31b7565b1617..a7be591d8e18 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -327,6 +327,8 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) | |||
327 | default: | 327 | default: |
328 | seq_puts(s, "(unknown)"); | 328 | seq_puts(s, "(unknown)"); |
329 | } | 329 | } |
330 | if (server->rdma) | ||
331 | seq_puts(s, ",rdma"); | ||
330 | } | 332 | } |
331 | 333 | ||
332 | static void | 334 | static void |
@@ -1068,6 +1070,7 @@ const struct file_operations cifs_file_ops = { | |||
1068 | .flush = cifs_flush, | 1070 | .flush = cifs_flush, |
1069 | .mmap = cifs_file_mmap, | 1071 | .mmap = cifs_file_mmap, |
1070 | .splice_read = generic_file_splice_read, | 1072 | .splice_read = generic_file_splice_read, |
1073 | .splice_write = iter_file_splice_write, | ||
1071 | .llseek = cifs_llseek, | 1074 | .llseek = cifs_llseek, |
1072 | .unlocked_ioctl = cifs_ioctl, | 1075 | .unlocked_ioctl = cifs_ioctl, |
1073 | .copy_file_range = cifs_copy_file_range, | 1076 | .copy_file_range = cifs_copy_file_range, |
@@ -1086,6 +1089,7 @@ const struct file_operations cifs_file_strict_ops = { | |||
1086 | .flush = cifs_flush, | 1089 | .flush = cifs_flush, |
1087 | .mmap = cifs_file_strict_mmap, | 1090 | .mmap = cifs_file_strict_mmap, |
1088 | .splice_read = generic_file_splice_read, | 1091 | .splice_read = generic_file_splice_read, |
1092 | .splice_write = iter_file_splice_write, | ||
1089 | .llseek = cifs_llseek, | 1093 | .llseek = cifs_llseek, |
1090 | .unlocked_ioctl = cifs_ioctl, | 1094 | .unlocked_ioctl = cifs_ioctl, |
1091 | .copy_file_range = cifs_copy_file_range, | 1095 | .copy_file_range = cifs_copy_file_range, |
@@ -1105,6 +1109,7 @@ const struct file_operations cifs_file_direct_ops = { | |||
1105 | .flush = cifs_flush, | 1109 | .flush = cifs_flush, |
1106 | .mmap = cifs_file_mmap, | 1110 | .mmap = cifs_file_mmap, |
1107 | .splice_read = generic_file_splice_read, | 1111 | .splice_read = generic_file_splice_read, |
1112 | .splice_write = iter_file_splice_write, | ||
1108 | .unlocked_ioctl = cifs_ioctl, | 1113 | .unlocked_ioctl = cifs_ioctl, |
1109 | .copy_file_range = cifs_copy_file_range, | 1114 | .copy_file_range = cifs_copy_file_range, |
1110 | .clone_file_range = cifs_clone_file_range, | 1115 | .clone_file_range = cifs_clone_file_range, |
@@ -1122,6 +1127,7 @@ const struct file_operations cifs_file_nobrl_ops = { | |||
1122 | .flush = cifs_flush, | 1127 | .flush = cifs_flush, |
1123 | .mmap = cifs_file_mmap, | 1128 | .mmap = cifs_file_mmap, |
1124 | .splice_read = generic_file_splice_read, | 1129 | .splice_read = generic_file_splice_read, |
1130 | .splice_write = iter_file_splice_write, | ||
1125 | .llseek = cifs_llseek, | 1131 | .llseek = cifs_llseek, |
1126 | .unlocked_ioctl = cifs_ioctl, | 1132 | .unlocked_ioctl = cifs_ioctl, |
1127 | .copy_file_range = cifs_copy_file_range, | 1133 | .copy_file_range = cifs_copy_file_range, |
@@ -1139,6 +1145,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = { | |||
1139 | .flush = cifs_flush, | 1145 | .flush = cifs_flush, |
1140 | .mmap = cifs_file_strict_mmap, | 1146 | .mmap = cifs_file_strict_mmap, |
1141 | .splice_read = generic_file_splice_read, | 1147 | .splice_read = generic_file_splice_read, |
1148 | .splice_write = iter_file_splice_write, | ||
1142 | .llseek = cifs_llseek, | 1149 | .llseek = cifs_llseek, |
1143 | .unlocked_ioctl = cifs_ioctl, | 1150 | .unlocked_ioctl = cifs_ioctl, |
1144 | .copy_file_range = cifs_copy_file_range, | 1151 | .copy_file_range = cifs_copy_file_range, |
@@ -1157,6 +1164,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { | |||
1157 | .flush = cifs_flush, | 1164 | .flush = cifs_flush, |
1158 | .mmap = cifs_file_mmap, | 1165 | .mmap = cifs_file_mmap, |
1159 | .splice_read = generic_file_splice_read, | 1166 | .splice_read = generic_file_splice_read, |
1167 | .splice_write = iter_file_splice_write, | ||
1160 | .unlocked_ioctl = cifs_ioctl, | 1168 | .unlocked_ioctl = cifs_ioctl, |
1161 | .copy_file_range = cifs_copy_file_range, | 1169 | .copy_file_range = cifs_copy_file_range, |
1162 | .clone_file_range = cifs_clone_file_range, | 1170 | .clone_file_range = cifs_clone_file_range, |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 5a10e566f0e6..013ba2aed8d9 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -149,5 +149,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
149 | extern const struct export_operations cifs_export_ops; | 149 | extern const struct export_operations cifs_export_ops; |
150 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ | 150 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ |
151 | 151 | ||
152 | #define CIFS_VERSION "2.10" | 152 | #define CIFS_VERSION "2.11" |
153 | #endif /* _CIFSFS_H */ | 153 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index b16583594d1a..48f7c197cd2d 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -64,8 +64,8 @@ | |||
64 | #define RFC1001_NAME_LEN 15 | 64 | #define RFC1001_NAME_LEN 15 |
65 | #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1) | 65 | #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1) |
66 | 66 | ||
67 | /* currently length of NIP6_FMT */ | 67 | /* maximum length of ip addr as a string (including ipv6 and sctp) */ |
68 | #define SERVER_NAME_LENGTH 40 | 68 | #define SERVER_NAME_LENGTH 80 |
69 | #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) | 69 | #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) |
70 | 70 | ||
71 | /* echo interval in seconds */ | 71 | /* echo interval in seconds */ |
@@ -230,8 +230,14 @@ struct smb_version_operations { | |||
230 | __u64 (*get_next_mid)(struct TCP_Server_Info *); | 230 | __u64 (*get_next_mid)(struct TCP_Server_Info *); |
231 | /* data offset from read response message */ | 231 | /* data offset from read response message */ |
232 | unsigned int (*read_data_offset)(char *); | 232 | unsigned int (*read_data_offset)(char *); |
233 | /* data length from read response message */ | 233 | /* |
234 | unsigned int (*read_data_length)(char *); | 234 | * Data length from read response message |
235 | * When in_remaining is true, the returned data length is in | ||
236 | * message field DataRemaining for out-of-band data read (e.g through | ||
237 | * Memory Registration RDMA write in SMBD). | ||
238 | * Otherwise, the returned data length is in message field DataLength. | ||
239 | */ | ||
240 | unsigned int (*read_data_length)(char *, bool in_remaining); | ||
235 | /* map smb to linux error */ | 241 | /* map smb to linux error */ |
236 | int (*map_error)(char *, bool); | 242 | int (*map_error)(char *, bool); |
237 | /* find mid corresponding to the response message */ | 243 | /* find mid corresponding to the response message */ |
@@ -532,6 +538,7 @@ struct smb_vol { | |||
532 | bool nopersistent:1; | 538 | bool nopersistent:1; |
533 | bool resilient:1; /* noresilient not required since not fored for CA */ | 539 | bool resilient:1; /* noresilient not required since not fored for CA */ |
534 | bool domainauto:1; | 540 | bool domainauto:1; |
541 | bool rdma:1; | ||
535 | unsigned int rsize; | 542 | unsigned int rsize; |
536 | unsigned int wsize; | 543 | unsigned int wsize; |
537 | bool sockopt_tcp_nodelay:1; | 544 | bool sockopt_tcp_nodelay:1; |
@@ -648,6 +655,10 @@ struct TCP_Server_Info { | |||
648 | bool sec_kerberos; /* supports plain Kerberos */ | 655 | bool sec_kerberos; /* supports plain Kerberos */ |
649 | bool sec_mskerberos; /* supports legacy MS Kerberos */ | 656 | bool sec_mskerberos; /* supports legacy MS Kerberos */ |
650 | bool large_buf; /* is current buffer large? */ | 657 | bool large_buf; /* is current buffer large? */ |
658 | /* use SMBD connection instead of socket */ | ||
659 | bool rdma; | ||
660 | /* point to the SMBD connection if RDMA is used instead of socket */ | ||
661 | struct smbd_connection *smbd_conn; | ||
651 | struct delayed_work echo; /* echo ping workqueue job */ | 662 | struct delayed_work echo; /* echo ping workqueue job */ |
652 | char *smallbuf; /* pointer to current "small" buffer */ | 663 | char *smallbuf; /* pointer to current "small" buffer */ |
653 | char *bigbuf; /* pointer to current "big" buffer */ | 664 | char *bigbuf; /* pointer to current "big" buffer */ |
@@ -822,12 +833,12 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) | |||
822 | struct cifs_ses { | 833 | struct cifs_ses { |
823 | struct list_head smb_ses_list; | 834 | struct list_head smb_ses_list; |
824 | struct list_head tcon_list; | 835 | struct list_head tcon_list; |
836 | struct cifs_tcon *tcon_ipc; | ||
825 | struct mutex session_mutex; | 837 | struct mutex session_mutex; |
826 | struct TCP_Server_Info *server; /* pointer to server info */ | 838 | struct TCP_Server_Info *server; /* pointer to server info */ |
827 | int ses_count; /* reference counter */ | 839 | int ses_count; /* reference counter */ |
828 | enum statusEnum status; | 840 | enum statusEnum status; |
829 | unsigned overrideSecFlg; /* if non-zero override global sec flags */ | 841 | unsigned overrideSecFlg; /* if non-zero override global sec flags */ |
830 | __u32 ipc_tid; /* special tid for connection to IPC share */ | ||
831 | char *serverOS; /* name of operating system underlying server */ | 842 | char *serverOS; /* name of operating system underlying server */ |
832 | char *serverNOS; /* name of network operating system of server */ | 843 | char *serverNOS; /* name of network operating system of server */ |
833 | char *serverDomain; /* security realm of server */ | 844 | char *serverDomain; /* security realm of server */ |
@@ -835,8 +846,7 @@ struct cifs_ses { | |||
835 | kuid_t linux_uid; /* overriding owner of files on the mount */ | 846 | kuid_t linux_uid; /* overriding owner of files on the mount */ |
836 | kuid_t cred_uid; /* owner of credentials */ | 847 | kuid_t cred_uid; /* owner of credentials */ |
837 | unsigned int capabilities; | 848 | unsigned int capabilities; |
838 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for | 849 | char serverName[SERVER_NAME_LEN_WITH_NULL]; |
839 | TCP names - will ipv6 and sctp addresses fit? */ | ||
840 | char *user_name; /* must not be null except during init of sess | 850 | char *user_name; /* must not be null except during init of sess |
841 | and after mount option parsing we fill it */ | 851 | and after mount option parsing we fill it */ |
842 | char *domainName; | 852 | char *domainName; |
@@ -931,7 +941,9 @@ struct cifs_tcon { | |||
931 | FILE_SYSTEM_DEVICE_INFO fsDevInfo; | 941 | FILE_SYSTEM_DEVICE_INFO fsDevInfo; |
932 | FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ | 942 | FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ |
933 | FILE_SYSTEM_UNIX_INFO fsUnixInfo; | 943 | FILE_SYSTEM_UNIX_INFO fsUnixInfo; |
934 | bool ipc:1; /* set if connection to IPC$ eg for RPC/PIPES */ | 944 | bool ipc:1; /* set if connection to IPC$ share (always also pipe) */ |
945 | bool pipe:1; /* set if connection to pipe share */ | ||
946 | bool print:1; /* set if connection to printer share */ | ||
935 | bool retry:1; | 947 | bool retry:1; |
936 | bool nocase:1; | 948 | bool nocase:1; |
937 | bool seal:1; /* transport encryption for this mounted share */ | 949 | bool seal:1; /* transport encryption for this mounted share */ |
@@ -944,7 +956,6 @@ struct cifs_tcon { | |||
944 | bool need_reopen_files:1; /* need to reopen tcon file handles */ | 956 | bool need_reopen_files:1; /* need to reopen tcon file handles */ |
945 | bool use_resilient:1; /* use resilient instead of durable handles */ | 957 | bool use_resilient:1; /* use resilient instead of durable handles */ |
946 | bool use_persistent:1; /* use persistent instead of durable handles */ | 958 | bool use_persistent:1; /* use persistent instead of durable handles */ |
947 | bool print:1; /* set if connection to printer share */ | ||
948 | __le32 capabilities; | 959 | __le32 capabilities; |
949 | __u32 share_flags; | 960 | __u32 share_flags; |
950 | __u32 maximal_access; | 961 | __u32 maximal_access; |
@@ -1147,6 +1158,9 @@ struct cifs_readdata { | |||
1147 | struct cifs_readdata *rdata, | 1158 | struct cifs_readdata *rdata, |
1148 | struct iov_iter *iter); | 1159 | struct iov_iter *iter); |
1149 | struct kvec iov[2]; | 1160 | struct kvec iov[2]; |
1161 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
1162 | struct smbd_mr *mr; | ||
1163 | #endif | ||
1150 | unsigned int pagesz; | 1164 | unsigned int pagesz; |
1151 | unsigned int tailsz; | 1165 | unsigned int tailsz; |
1152 | unsigned int credits; | 1166 | unsigned int credits; |
@@ -1169,6 +1183,9 @@ struct cifs_writedata { | |||
1169 | pid_t pid; | 1183 | pid_t pid; |
1170 | unsigned int bytes; | 1184 | unsigned int bytes; |
1171 | int result; | 1185 | int result; |
1186 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
1187 | struct smbd_mr *mr; | ||
1188 | #endif | ||
1172 | unsigned int pagesz; | 1189 | unsigned int pagesz; |
1173 | unsigned int tailsz; | 1190 | unsigned int tailsz; |
1174 | unsigned int credits; | 1191 | unsigned int credits; |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 4143c9dec463..93d565186698 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -106,6 +106,10 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, | |||
106 | struct kvec *, int /* nvec to send */, | 106 | struct kvec *, int /* nvec to send */, |
107 | int * /* type of buf returned */, const int flags, | 107 | int * /* type of buf returned */, const int flags, |
108 | struct kvec * /* resp vec */); | 108 | struct kvec * /* resp vec */); |
109 | extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses, | ||
110 | struct kvec *pkvec, int nvec_to_send, | ||
111 | int *pbuftype, const int flags, | ||
112 | struct kvec *presp); | ||
109 | extern int SendReceiveBlockingLock(const unsigned int xid, | 113 | extern int SendReceiveBlockingLock(const unsigned int xid, |
110 | struct cifs_tcon *ptcon, | 114 | struct cifs_tcon *ptcon, |
111 | struct smb_hdr *in_buf , | 115 | struct smb_hdr *in_buf , |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 35dc5bf01ee2..4e0922d24eb2 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include "cifs_unicode.h" | 43 | #include "cifs_unicode.h" |
44 | #include "cifs_debug.h" | 44 | #include "cifs_debug.h" |
45 | #include "fscache.h" | 45 | #include "fscache.h" |
46 | #include "smbdirect.h" | ||
46 | 47 | ||
47 | #ifdef CONFIG_CIFS_POSIX | 48 | #ifdef CONFIG_CIFS_POSIX |
48 | static struct { | 49 | static struct { |
@@ -1454,6 +1455,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
1454 | struct cifs_readdata *rdata = mid->callback_data; | 1455 | struct cifs_readdata *rdata = mid->callback_data; |
1455 | char *buf = server->smallbuf; | 1456 | char *buf = server->smallbuf; |
1456 | unsigned int buflen = get_rfc1002_length(buf) + 4; | 1457 | unsigned int buflen = get_rfc1002_length(buf) + 4; |
1458 | bool use_rdma_mr = false; | ||
1457 | 1459 | ||
1458 | cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", | 1460 | cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", |
1459 | __func__, mid->mid, rdata->offset, rdata->bytes); | 1461 | __func__, mid->mid, rdata->offset, rdata->bytes); |
@@ -1542,8 +1544,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
1542 | rdata->iov[0].iov_base, server->total_read); | 1544 | rdata->iov[0].iov_base, server->total_read); |
1543 | 1545 | ||
1544 | /* how much data is in the response? */ | 1546 | /* how much data is in the response? */ |
1545 | data_len = server->ops->read_data_length(buf); | 1547 | #ifdef CONFIG_CIFS_SMB_DIRECT |
1546 | if (data_offset + data_len > buflen) { | 1548 | use_rdma_mr = rdata->mr; |
1549 | #endif | ||
1550 | data_len = server->ops->read_data_length(buf, use_rdma_mr); | ||
1551 | if (!use_rdma_mr && (data_offset + data_len > buflen)) { | ||
1547 | /* data_len is corrupt -- discard frame */ | 1552 | /* data_len is corrupt -- discard frame */ |
1548 | rdata->result = -EIO; | 1553 | rdata->result = -EIO; |
1549 | return cifs_readv_discard(server, mid); | 1554 | return cifs_readv_discard(server, mid); |
@@ -1923,6 +1928,12 @@ cifs_writedata_release(struct kref *refcount) | |||
1923 | { | 1928 | { |
1924 | struct cifs_writedata *wdata = container_of(refcount, | 1929 | struct cifs_writedata *wdata = container_of(refcount, |
1925 | struct cifs_writedata, refcount); | 1930 | struct cifs_writedata, refcount); |
1931 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
1932 | if (wdata->mr) { | ||
1933 | smbd_deregister_mr(wdata->mr); | ||
1934 | wdata->mr = NULL; | ||
1935 | } | ||
1936 | #endif | ||
1926 | 1937 | ||
1927 | if (wdata->cfile) | 1938 | if (wdata->cfile) |
1928 | cifsFileInfo_put(wdata->cfile); | 1939 | cifsFileInfo_put(wdata->cfile); |
@@ -4822,10 +4833,11 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, | |||
4822 | *target_nodes = NULL; | 4833 | *target_nodes = NULL; |
4823 | 4834 | ||
4824 | cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name); | 4835 | cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name); |
4825 | if (ses == NULL) | 4836 | if (ses == NULL || ses->tcon_ipc == NULL) |
4826 | return -ENODEV; | 4837 | return -ENODEV; |
4838 | |||
4827 | getDFSRetry: | 4839 | getDFSRetry: |
4828 | rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB, | 4840 | rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB, |
4829 | (void **) &pSMBr); | 4841 | (void **) &pSMBr); |
4830 | if (rc) | 4842 | if (rc) |
4831 | return rc; | 4843 | return rc; |
@@ -4833,7 +4845,7 @@ getDFSRetry: | |||
4833 | /* server pointer checked in called function, | 4845 | /* server pointer checked in called function, |
4834 | but should never be null here anyway */ | 4846 | but should never be null here anyway */ |
4835 | pSMB->hdr.Mid = get_next_mid(ses->server); | 4847 | pSMB->hdr.Mid = get_next_mid(ses->server); |
4836 | pSMB->hdr.Tid = ses->ipc_tid; | 4848 | pSMB->hdr.Tid = ses->tcon_ipc->tid; |
4837 | pSMB->hdr.Uid = ses->Suid; | 4849 | pSMB->hdr.Uid = ses->Suid; |
4838 | if (ses->capabilities & CAP_STATUS32) | 4850 | if (ses->capabilities & CAP_STATUS32) |
4839 | pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS; | 4851 | pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0bfc2280436d..a726f524fb84 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <net/ipv6.h> | 44 | #include <net/ipv6.h> |
45 | #include <linux/parser.h> | 45 | #include <linux/parser.h> |
46 | #include <linux/bvec.h> | 46 | #include <linux/bvec.h> |
47 | |||
48 | #include "cifspdu.h" | 47 | #include "cifspdu.h" |
49 | #include "cifsglob.h" | 48 | #include "cifsglob.h" |
50 | #include "cifsproto.h" | 49 | #include "cifsproto.h" |
@@ -56,6 +55,7 @@ | |||
56 | #include "rfc1002pdu.h" | 55 | #include "rfc1002pdu.h" |
57 | #include "fscache.h" | 56 | #include "fscache.h" |
58 | #include "smb2proto.h" | 57 | #include "smb2proto.h" |
58 | #include "smbdirect.h" | ||
59 | 59 | ||
60 | #define CIFS_PORT 445 | 60 | #define CIFS_PORT 445 |
61 | #define RFC1001_PORT 139 | 61 | #define RFC1001_PORT 139 |
@@ -92,7 +92,7 @@ enum { | |||
92 | Opt_multiuser, Opt_sloppy, Opt_nosharesock, | 92 | Opt_multiuser, Opt_sloppy, Opt_nosharesock, |
93 | Opt_persistent, Opt_nopersistent, | 93 | Opt_persistent, Opt_nopersistent, |
94 | Opt_resilient, Opt_noresilient, | 94 | Opt_resilient, Opt_noresilient, |
95 | Opt_domainauto, | 95 | Opt_domainauto, Opt_rdma, |
96 | 96 | ||
97 | /* Mount options which take numeric value */ | 97 | /* Mount options which take numeric value */ |
98 | Opt_backupuid, Opt_backupgid, Opt_uid, | 98 | Opt_backupuid, Opt_backupgid, Opt_uid, |
@@ -183,6 +183,7 @@ static const match_table_t cifs_mount_option_tokens = { | |||
183 | { Opt_resilient, "resilienthandles"}, | 183 | { Opt_resilient, "resilienthandles"}, |
184 | { Opt_noresilient, "noresilienthandles"}, | 184 | { Opt_noresilient, "noresilienthandles"}, |
185 | { Opt_domainauto, "domainauto"}, | 185 | { Opt_domainauto, "domainauto"}, |
186 | { Opt_rdma, "rdma"}, | ||
186 | 187 | ||
187 | { Opt_backupuid, "backupuid=%s" }, | 188 | { Opt_backupuid, "backupuid=%s" }, |
188 | { Opt_backupgid, "backupgid=%s" }, | 189 | { Opt_backupgid, "backupgid=%s" }, |
@@ -353,11 +354,12 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
353 | list_for_each(tmp, &server->smb_ses_list) { | 354 | list_for_each(tmp, &server->smb_ses_list) { |
354 | ses = list_entry(tmp, struct cifs_ses, smb_ses_list); | 355 | ses = list_entry(tmp, struct cifs_ses, smb_ses_list); |
355 | ses->need_reconnect = true; | 356 | ses->need_reconnect = true; |
356 | ses->ipc_tid = 0; | ||
357 | list_for_each(tmp2, &ses->tcon_list) { | 357 | list_for_each(tmp2, &ses->tcon_list) { |
358 | tcon = list_entry(tmp2, struct cifs_tcon, tcon_list); | 358 | tcon = list_entry(tmp2, struct cifs_tcon, tcon_list); |
359 | tcon->need_reconnect = true; | 359 | tcon->need_reconnect = true; |
360 | } | 360 | } |
361 | if (ses->tcon_ipc) | ||
362 | ses->tcon_ipc->need_reconnect = true; | ||
361 | } | 363 | } |
362 | spin_unlock(&cifs_tcp_ses_lock); | 364 | spin_unlock(&cifs_tcp_ses_lock); |
363 | 365 | ||
@@ -405,7 +407,10 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
405 | 407 | ||
406 | /* we should try only the port we connected to before */ | 408 | /* we should try only the port we connected to before */ |
407 | mutex_lock(&server->srv_mutex); | 409 | mutex_lock(&server->srv_mutex); |
408 | rc = generic_ip_connect(server); | 410 | if (cifs_rdma_enabled(server)) |
411 | rc = smbd_reconnect(server); | ||
412 | else | ||
413 | rc = generic_ip_connect(server); | ||
409 | if (rc) { | 414 | if (rc) { |
410 | cifs_dbg(FYI, "reconnect error %d\n", rc); | 415 | cifs_dbg(FYI, "reconnect error %d\n", rc); |
411 | mutex_unlock(&server->srv_mutex); | 416 | mutex_unlock(&server->srv_mutex); |
@@ -538,8 +543,10 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) | |||
538 | 543 | ||
539 | if (server_unresponsive(server)) | 544 | if (server_unresponsive(server)) |
540 | return -ECONNABORTED; | 545 | return -ECONNABORTED; |
541 | 546 | if (cifs_rdma_enabled(server) && server->smbd_conn) | |
542 | length = sock_recvmsg(server->ssocket, smb_msg, 0); | 547 | length = smbd_recv(server->smbd_conn, smb_msg); |
548 | else | ||
549 | length = sock_recvmsg(server->ssocket, smb_msg, 0); | ||
543 | 550 | ||
544 | if (server->tcpStatus == CifsExiting) | 551 | if (server->tcpStatus == CifsExiting) |
545 | return -ESHUTDOWN; | 552 | return -ESHUTDOWN; |
@@ -700,7 +707,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) | |||
700 | wake_up_all(&server->request_q); | 707 | wake_up_all(&server->request_q); |
701 | /* give those requests time to exit */ | 708 | /* give those requests time to exit */ |
702 | msleep(125); | 709 | msleep(125); |
703 | 710 | if (cifs_rdma_enabled(server) && server->smbd_conn) { | |
711 | smbd_destroy(server->smbd_conn); | ||
712 | server->smbd_conn = NULL; | ||
713 | } | ||
704 | if (server->ssocket) { | 714 | if (server->ssocket) { |
705 | sock_release(server->ssocket); | 715 | sock_release(server->ssocket); |
706 | server->ssocket = NULL; | 716 | server->ssocket = NULL; |
@@ -1550,6 +1560,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1550 | case Opt_domainauto: | 1560 | case Opt_domainauto: |
1551 | vol->domainauto = true; | 1561 | vol->domainauto = true; |
1552 | break; | 1562 | break; |
1563 | case Opt_rdma: | ||
1564 | vol->rdma = true; | ||
1565 | break; | ||
1553 | 1566 | ||
1554 | /* Numeric Values */ | 1567 | /* Numeric Values */ |
1555 | case Opt_backupuid: | 1568 | case Opt_backupuid: |
@@ -1707,7 +1720,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1707 | tmp_end++; | 1720 | tmp_end++; |
1708 | if (!(tmp_end < end && tmp_end[1] == delim)) { | 1721 | if (!(tmp_end < end && tmp_end[1] == delim)) { |
1709 | /* No it is not. Set the password to NULL */ | 1722 | /* No it is not. Set the password to NULL */ |
1710 | kfree(vol->password); | 1723 | kzfree(vol->password); |
1711 | vol->password = NULL; | 1724 | vol->password = NULL; |
1712 | break; | 1725 | break; |
1713 | } | 1726 | } |
@@ -1745,7 +1758,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1745 | options = end; | 1758 | options = end; |
1746 | } | 1759 | } |
1747 | 1760 | ||
1748 | kfree(vol->password); | 1761 | kzfree(vol->password); |
1749 | /* Now build new password string */ | 1762 | /* Now build new password string */ |
1750 | temp_len = strlen(value); | 1763 | temp_len = strlen(value); |
1751 | vol->password = kzalloc(temp_len+1, GFP_KERNEL); | 1764 | vol->password = kzalloc(temp_len+1, GFP_KERNEL); |
@@ -1951,6 +1964,19 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1951 | goto cifs_parse_mount_err; | 1964 | goto cifs_parse_mount_err; |
1952 | } | 1965 | } |
1953 | 1966 | ||
1967 | if (vol->rdma && vol->vals->protocol_id < SMB30_PROT_ID) { | ||
1968 | cifs_dbg(VFS, "SMB Direct requires Version >=3.0\n"); | ||
1969 | goto cifs_parse_mount_err; | ||
1970 | } | ||
1971 | |||
1972 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
1973 | if (vol->rdma && vol->sign) { | ||
1974 | cifs_dbg(VFS, "Currently SMB direct doesn't support signing." | ||
1975 | " This is being fixed\n"); | ||
1976 | goto cifs_parse_mount_err; | ||
1977 | } | ||
1978 | #endif | ||
1979 | |||
1954 | #ifndef CONFIG_KEYS | 1980 | #ifndef CONFIG_KEYS |
1955 | /* Muliuser mounts require CONFIG_KEYS support */ | 1981 | /* Muliuser mounts require CONFIG_KEYS support */ |
1956 | if (vol->multiuser) { | 1982 | if (vol->multiuser) { |
@@ -2162,6 +2188,9 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol) | |||
2162 | if (server->echo_interval != vol->echo_interval * HZ) | 2188 | if (server->echo_interval != vol->echo_interval * HZ) |
2163 | return 0; | 2189 | return 0; |
2164 | 2190 | ||
2191 | if (server->rdma != vol->rdma) | ||
2192 | return 0; | ||
2193 | |||
2165 | return 1; | 2194 | return 1; |
2166 | } | 2195 | } |
2167 | 2196 | ||
@@ -2260,6 +2289,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
2260 | tcp_ses->noblocksnd = volume_info->noblocksnd; | 2289 | tcp_ses->noblocksnd = volume_info->noblocksnd; |
2261 | tcp_ses->noautotune = volume_info->noautotune; | 2290 | tcp_ses->noautotune = volume_info->noautotune; |
2262 | tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; | 2291 | tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; |
2292 | tcp_ses->rdma = volume_info->rdma; | ||
2263 | tcp_ses->in_flight = 0; | 2293 | tcp_ses->in_flight = 0; |
2264 | tcp_ses->credits = 1; | 2294 | tcp_ses->credits = 1; |
2265 | init_waitqueue_head(&tcp_ses->response_q); | 2295 | init_waitqueue_head(&tcp_ses->response_q); |
@@ -2297,13 +2327,29 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
2297 | tcp_ses->echo_interval = volume_info->echo_interval * HZ; | 2327 | tcp_ses->echo_interval = volume_info->echo_interval * HZ; |
2298 | else | 2328 | else |
2299 | tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; | 2329 | tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ; |
2300 | 2330 | if (tcp_ses->rdma) { | |
2331 | #ifndef CONFIG_CIFS_SMB_DIRECT | ||
2332 | cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n"); | ||
2333 | rc = -ENOENT; | ||
2334 | goto out_err_crypto_release; | ||
2335 | #endif | ||
2336 | tcp_ses->smbd_conn = smbd_get_connection( | ||
2337 | tcp_ses, (struct sockaddr *)&volume_info->dstaddr); | ||
2338 | if (tcp_ses->smbd_conn) { | ||
2339 | cifs_dbg(VFS, "RDMA transport established\n"); | ||
2340 | rc = 0; | ||
2341 | goto smbd_connected; | ||
2342 | } else { | ||
2343 | rc = -ENOENT; | ||
2344 | goto out_err_crypto_release; | ||
2345 | } | ||
2346 | } | ||
2301 | rc = ip_connect(tcp_ses); | 2347 | rc = ip_connect(tcp_ses); |
2302 | if (rc < 0) { | 2348 | if (rc < 0) { |
2303 | cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); | 2349 | cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n"); |
2304 | goto out_err_crypto_release; | 2350 | goto out_err_crypto_release; |
2305 | } | 2351 | } |
2306 | 2352 | smbd_connected: | |
2307 | /* | 2353 | /* |
2308 | * since we're in a cifs function already, we know that | 2354 | * since we're in a cifs function already, we know that |
2309 | * this will succeed. No need for try_module_get(). | 2355 | * this will succeed. No need for try_module_get(). |
@@ -2381,6 +2427,93 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol) | |||
2381 | return 1; | 2427 | return 1; |
2382 | } | 2428 | } |
2383 | 2429 | ||
2430 | /** | ||
2431 | * cifs_setup_ipc - helper to setup the IPC tcon for the session | ||
2432 | * | ||
2433 | * A new IPC connection is made and stored in the session | ||
2434 | * tcon_ipc. The IPC tcon has the same lifetime as the session. | ||
2435 | */ | ||
2436 | static int | ||
2437 | cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info) | ||
2438 | { | ||
2439 | int rc = 0, xid; | ||
2440 | struct cifs_tcon *tcon; | ||
2441 | struct nls_table *nls_codepage; | ||
2442 | char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; | ||
2443 | bool seal = false; | ||
2444 | |||
2445 | /* | ||
2446 | * If the mount request that resulted in the creation of the | ||
2447 | * session requires encryption, force IPC to be encrypted too. | ||
2448 | */ | ||
2449 | if (volume_info->seal) { | ||
2450 | if (ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) | ||
2451 | seal = true; | ||
2452 | else { | ||
2453 | cifs_dbg(VFS, | ||
2454 | "IPC: server doesn't support encryption\n"); | ||
2455 | return -EOPNOTSUPP; | ||
2456 | } | ||
2457 | } | ||
2458 | |||
2459 | tcon = tconInfoAlloc(); | ||
2460 | if (tcon == NULL) | ||
2461 | return -ENOMEM; | ||
2462 | |||
2463 | snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName); | ||
2464 | |||
2465 | /* cannot fail */ | ||
2466 | nls_codepage = load_nls_default(); | ||
2467 | |||
2468 | xid = get_xid(); | ||
2469 | tcon->ses = ses; | ||
2470 | tcon->ipc = true; | ||
2471 | tcon->seal = seal; | ||
2472 | rc = ses->server->ops->tree_connect(xid, ses, unc, tcon, nls_codepage); | ||
2473 | free_xid(xid); | ||
2474 | |||
2475 | if (rc) { | ||
2476 | cifs_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); | ||
2477 | tconInfoFree(tcon); | ||
2478 | goto out; | ||
2479 | } | ||
2480 | |||
2481 | cifs_dbg(FYI, "IPC tcon rc = %d ipc tid = %d\n", rc, tcon->tid); | ||
2482 | |||
2483 | ses->tcon_ipc = tcon; | ||
2484 | out: | ||
2485 | unload_nls(nls_codepage); | ||
2486 | return rc; | ||
2487 | } | ||
2488 | |||
2489 | /** | ||
2490 | * cifs_free_ipc - helper to release the session IPC tcon | ||
2491 | * | ||
2492 | * Needs to be called everytime a session is destroyed | ||
2493 | */ | ||
2494 | static int | ||
2495 | cifs_free_ipc(struct cifs_ses *ses) | ||
2496 | { | ||
2497 | int rc = 0, xid; | ||
2498 | struct cifs_tcon *tcon = ses->tcon_ipc; | ||
2499 | |||
2500 | if (tcon == NULL) | ||
2501 | return 0; | ||
2502 | |||
2503 | if (ses->server->ops->tree_disconnect) { | ||
2504 | xid = get_xid(); | ||
2505 | rc = ses->server->ops->tree_disconnect(xid, tcon); | ||
2506 | free_xid(xid); | ||
2507 | } | ||
2508 | |||
2509 | if (rc) | ||
2510 | cifs_dbg(FYI, "failed to disconnect IPC tcon (rc=%d)\n", rc); | ||
2511 | |||
2512 | tconInfoFree(tcon); | ||
2513 | ses->tcon_ipc = NULL; | ||
2514 | return rc; | ||
2515 | } | ||
2516 | |||
2384 | static struct cifs_ses * | 2517 | static struct cifs_ses * |
2385 | cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) | 2518 | cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) |
2386 | { | 2519 | { |
@@ -2421,6 +2554,8 @@ cifs_put_smb_ses(struct cifs_ses *ses) | |||
2421 | ses->status = CifsExiting; | 2554 | ses->status = CifsExiting; |
2422 | spin_unlock(&cifs_tcp_ses_lock); | 2555 | spin_unlock(&cifs_tcp_ses_lock); |
2423 | 2556 | ||
2557 | cifs_free_ipc(ses); | ||
2558 | |||
2424 | if (ses->status == CifsExiting && server->ops->logoff) { | 2559 | if (ses->status == CifsExiting && server->ops->logoff) { |
2425 | xid = get_xid(); | 2560 | xid = get_xid(); |
2426 | rc = server->ops->logoff(xid, ses); | 2561 | rc = server->ops->logoff(xid, ses); |
@@ -2569,6 +2704,13 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)), | |||
2569 | } | 2704 | } |
2570 | #endif /* CONFIG_KEYS */ | 2705 | #endif /* CONFIG_KEYS */ |
2571 | 2706 | ||
2707 | /** | ||
2708 | * cifs_get_smb_ses - get a session matching @volume_info data from @server | ||
2709 | * | ||
2710 | * This function assumes it is being called from cifs_mount() where we | ||
2711 | * already got a server reference (server refcount +1). See | ||
2712 | * cifs_get_tcon() for refcount explanations. | ||
2713 | */ | ||
2572 | static struct cifs_ses * | 2714 | static struct cifs_ses * |
2573 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | 2715 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) |
2574 | { | 2716 | { |
@@ -2665,6 +2807,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
2665 | spin_unlock(&cifs_tcp_ses_lock); | 2807 | spin_unlock(&cifs_tcp_ses_lock); |
2666 | 2808 | ||
2667 | free_xid(xid); | 2809 | free_xid(xid); |
2810 | |||
2811 | cifs_setup_ipc(ses, volume_info); | ||
2812 | |||
2668 | return ses; | 2813 | return ses; |
2669 | 2814 | ||
2670 | get_ses_fail: | 2815 | get_ses_fail: |
@@ -2709,8 +2854,16 @@ void | |||
2709 | cifs_put_tcon(struct cifs_tcon *tcon) | 2854 | cifs_put_tcon(struct cifs_tcon *tcon) |
2710 | { | 2855 | { |
2711 | unsigned int xid; | 2856 | unsigned int xid; |
2712 | struct cifs_ses *ses = tcon->ses; | 2857 | struct cifs_ses *ses; |
2858 | |||
2859 | /* | ||
2860 | * IPC tcon share the lifetime of their session and are | ||
2861 | * destroyed in the session put function | ||
2862 | */ | ||
2863 | if (tcon == NULL || tcon->ipc) | ||
2864 | return; | ||
2713 | 2865 | ||
2866 | ses = tcon->ses; | ||
2714 | cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); | 2867 | cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count); |
2715 | spin_lock(&cifs_tcp_ses_lock); | 2868 | spin_lock(&cifs_tcp_ses_lock); |
2716 | if (--tcon->tc_count > 0) { | 2869 | if (--tcon->tc_count > 0) { |
@@ -2731,6 +2884,26 @@ cifs_put_tcon(struct cifs_tcon *tcon) | |||
2731 | cifs_put_smb_ses(ses); | 2884 | cifs_put_smb_ses(ses); |
2732 | } | 2885 | } |
2733 | 2886 | ||
2887 | /** | ||
2888 | * cifs_get_tcon - get a tcon matching @volume_info data from @ses | ||
2889 | * | ||
2890 | * - tcon refcount is the number of mount points using the tcon. | ||
2891 | * - ses refcount is the number of tcon using the session. | ||
2892 | * | ||
2893 | * 1. This function assumes it is being called from cifs_mount() where | ||
2894 | * we already got a session reference (ses refcount +1). | ||
2895 | * | ||
2896 | * 2. Since we're in the context of adding a mount point, the end | ||
2897 | * result should be either: | ||
2898 | * | ||
2899 | * a) a new tcon already allocated with refcount=1 (1 mount point) and | ||
2900 | * its session refcount incremented (1 new tcon). This +1 was | ||
2901 | * already done in (1). | ||
2902 | * | ||
2903 | * b) an existing tcon with refcount+1 (add a mount point to it) and | ||
2904 | * identical ses refcount (no new tcon). Because of (1) we need to | ||
2905 | * decrement the ses refcount. | ||
2906 | */ | ||
2734 | static struct cifs_tcon * | 2907 | static struct cifs_tcon * |
2735 | cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | 2908 | cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) |
2736 | { | 2909 | { |
@@ -2739,8 +2912,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) | |||
2739 | 2912 | ||
2740 | tcon = cifs_find_tcon(ses, volume_info); | 2913 | tcon = cifs_find_tcon(ses, volume_info); |
2741 | if (tcon) { | 2914 | if (tcon) { |
2915 | /* | ||
2916 | * tcon has refcount already incremented but we need to | ||
2917 | * decrement extra ses reference gotten by caller (case b) | ||
2918 | */ | ||
2742 | cifs_dbg(FYI, "Found match on UNC path\n"); | 2919 | cifs_dbg(FYI, "Found match on UNC path\n"); |
2743 | /* existing tcon already has a reference */ | ||
2744 | cifs_put_smb_ses(ses); | 2920 | cifs_put_smb_ses(ses); |
2745 | return tcon; | 2921 | return tcon; |
2746 | } | 2922 | } |
@@ -2986,39 +3162,17 @@ get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path, | |||
2986 | const struct nls_table *nls_codepage, unsigned int *num_referrals, | 3162 | const struct nls_table *nls_codepage, unsigned int *num_referrals, |
2987 | struct dfs_info3_param **referrals, int remap) | 3163 | struct dfs_info3_param **referrals, int remap) |
2988 | { | 3164 | { |
2989 | char *temp_unc; | ||
2990 | int rc = 0; | 3165 | int rc = 0; |
2991 | 3166 | ||
2992 | if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer) | 3167 | if (!ses->server->ops->get_dfs_refer) |
2993 | return -ENOSYS; | 3168 | return -ENOSYS; |
2994 | 3169 | ||
2995 | *num_referrals = 0; | 3170 | *num_referrals = 0; |
2996 | *referrals = NULL; | 3171 | *referrals = NULL; |
2997 | 3172 | ||
2998 | if (ses->ipc_tid == 0) { | 3173 | rc = ses->server->ops->get_dfs_refer(xid, ses, old_path, |
2999 | temp_unc = kmalloc(2 /* for slashes */ + | 3174 | referrals, num_referrals, |
3000 | strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2) | 3175 | nls_codepage, remap); |
3001 | + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL); | ||
3002 | if (temp_unc == NULL) | ||
3003 | return -ENOMEM; | ||
3004 | temp_unc[0] = '\\'; | ||
3005 | temp_unc[1] = '\\'; | ||
3006 | strcpy(temp_unc + 2, ses->serverName); | ||
3007 | strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$"); | ||
3008 | rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL, | ||
3009 | nls_codepage); | ||
3010 | cifs_dbg(FYI, "Tcon rc = %d ipc_tid = %d\n", rc, ses->ipc_tid); | ||
3011 | kfree(temp_unc); | ||
3012 | } | ||
3013 | if (rc == 0) | ||
3014 | rc = ses->server->ops->get_dfs_refer(xid, ses, old_path, | ||
3015 | referrals, num_referrals, | ||
3016 | nls_codepage, remap); | ||
3017 | /* | ||
3018 | * BB - map targetUNCs to dfs_info3 structures, here or in | ||
3019 | * ses->server->ops->get_dfs_refer. | ||
3020 | */ | ||
3021 | |||
3022 | return rc; | 3176 | return rc; |
3023 | } | 3177 | } |
3024 | 3178 | ||
@@ -3783,7 +3937,7 @@ try_mount_again: | |||
3783 | tcon->unix_ext = 0; /* server does not support them */ | 3937 | tcon->unix_ext = 0; /* server does not support them */ |
3784 | 3938 | ||
3785 | /* do not care if a following call succeed - informational */ | 3939 | /* do not care if a following call succeed - informational */ |
3786 | if (!tcon->ipc && server->ops->qfs_tcon) | 3940 | if (!tcon->pipe && server->ops->qfs_tcon) |
3787 | server->ops->qfs_tcon(xid, tcon); | 3941 | server->ops->qfs_tcon(xid, tcon); |
3788 | 3942 | ||
3789 | cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info); | 3943 | cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info); |
@@ -3913,8 +4067,7 @@ out: | |||
3913 | } | 4067 | } |
3914 | 4068 | ||
3915 | /* | 4069 | /* |
3916 | * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon | 4070 | * Issue a TREE_CONNECT request. |
3917 | * pointer may be NULL. | ||
3918 | */ | 4071 | */ |
3919 | int | 4072 | int |
3920 | CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | 4073 | CIFSTCon(const unsigned int xid, struct cifs_ses *ses, |
@@ -3950,7 +4103,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | |||
3950 | pSMB->AndXCommand = 0xFF; | 4103 | pSMB->AndXCommand = 0xFF; |
3951 | pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); | 4104 | pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); |
3952 | bcc_ptr = &pSMB->Password[0]; | 4105 | bcc_ptr = &pSMB->Password[0]; |
3953 | if (!tcon || (ses->server->sec_mode & SECMODE_USER)) { | 4106 | if (tcon->pipe || (ses->server->sec_mode & SECMODE_USER)) { |
3954 | pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ | 4107 | pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ |
3955 | *bcc_ptr = 0; /* password is null byte */ | 4108 | *bcc_ptr = 0; /* password is null byte */ |
3956 | bcc_ptr++; /* skip password */ | 4109 | bcc_ptr++; /* skip password */ |
@@ -4022,7 +4175,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | |||
4022 | 0); | 4175 | 0); |
4023 | 4176 | ||
4024 | /* above now done in SendReceive */ | 4177 | /* above now done in SendReceive */ |
4025 | if ((rc == 0) && (tcon != NULL)) { | 4178 | if (rc == 0) { |
4026 | bool is_unicode; | 4179 | bool is_unicode; |
4027 | 4180 | ||
4028 | tcon->tidStatus = CifsGood; | 4181 | tcon->tidStatus = CifsGood; |
@@ -4042,7 +4195,8 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | |||
4042 | if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && | 4195 | if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && |
4043 | (bcc_ptr[2] == 'C')) { | 4196 | (bcc_ptr[2] == 'C')) { |
4044 | cifs_dbg(FYI, "IPC connection\n"); | 4197 | cifs_dbg(FYI, "IPC connection\n"); |
4045 | tcon->ipc = 1; | 4198 | tcon->ipc = true; |
4199 | tcon->pipe = true; | ||
4046 | } | 4200 | } |
4047 | } else if (length == 2) { | 4201 | } else if (length == 2) { |
4048 | if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { | 4202 | if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { |
@@ -4069,9 +4223,6 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | |||
4069 | else | 4223 | else |
4070 | tcon->Flags = 0; | 4224 | tcon->Flags = 0; |
4071 | cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); | 4225 | cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); |
4072 | } else if ((rc == 0) && tcon == NULL) { | ||
4073 | /* all we need to save for IPC$ connection */ | ||
4074 | ses->ipc_tid = smb_buffer_response->Tid; | ||
4075 | } | 4226 | } |
4076 | 4227 | ||
4077 | cifs_buf_release(smb_buffer); | 4228 | cifs_buf_release(smb_buffer); |
@@ -4235,7 +4386,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) | |||
4235 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); | 4386 | reset_cifs_unix_caps(0, tcon, NULL, vol_info); |
4236 | out: | 4387 | out: |
4237 | kfree(vol_info->username); | 4388 | kfree(vol_info->username); |
4238 | kfree(vol_info->password); | 4389 | kzfree(vol_info->password); |
4239 | kfree(vol_info); | 4390 | kfree(vol_info); |
4240 | 4391 | ||
4241 | return tcon; | 4392 | return tcon; |
@@ -4387,7 +4538,7 @@ cifs_prune_tlinks(struct work_struct *work) | |||
4387 | struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, | 4538 | struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, |
4388 | prune_tlinks.work); | 4539 | prune_tlinks.work); |
4389 | struct rb_root *root = &cifs_sb->tlink_tree; | 4540 | struct rb_root *root = &cifs_sb->tlink_tree; |
4390 | struct rb_node *node = rb_first(root); | 4541 | struct rb_node *node; |
4391 | struct rb_node *tmp; | 4542 | struct rb_node *tmp; |
4392 | struct tcon_link *tlink; | 4543 | struct tcon_link *tlink; |
4393 | 4544 | ||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index df9f682708c6..7cee97b93a61 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include "cifs_debug.h" | 42 | #include "cifs_debug.h" |
43 | #include "cifs_fs_sb.h" | 43 | #include "cifs_fs_sb.h" |
44 | #include "fscache.h" | 44 | #include "fscache.h" |
45 | 45 | #include "smbdirect.h" | |
46 | 46 | ||
47 | static inline int cifs_convert_flags(unsigned int flags) | 47 | static inline int cifs_convert_flags(unsigned int flags) |
48 | { | 48 | { |
@@ -2902,7 +2902,12 @@ cifs_readdata_release(struct kref *refcount) | |||
2902 | { | 2902 | { |
2903 | struct cifs_readdata *rdata = container_of(refcount, | 2903 | struct cifs_readdata *rdata = container_of(refcount, |
2904 | struct cifs_readdata, refcount); | 2904 | struct cifs_readdata, refcount); |
2905 | 2905 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
2906 | if (rdata->mr) { | ||
2907 | smbd_deregister_mr(rdata->mr); | ||
2908 | rdata->mr = NULL; | ||
2909 | } | ||
2910 | #endif | ||
2906 | if (rdata->cfile) | 2911 | if (rdata->cfile) |
2907 | cifsFileInfo_put(rdata->cfile); | 2912 | cifsFileInfo_put(rdata->cfile); |
2908 | 2913 | ||
@@ -3031,6 +3036,10 @@ uncached_fill_pages(struct TCP_Server_Info *server, | |||
3031 | } | 3036 | } |
3032 | if (iter) | 3037 | if (iter) |
3033 | result = copy_page_from_iter(page, 0, n, iter); | 3038 | result = copy_page_from_iter(page, 0, n, iter); |
3039 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
3040 | else if (rdata->mr) | ||
3041 | result = n; | ||
3042 | #endif | ||
3034 | else | 3043 | else |
3035 | result = cifs_read_page_from_socket(server, page, n); | 3044 | result = cifs_read_page_from_socket(server, page, n); |
3036 | if (result < 0) | 3045 | if (result < 0) |
@@ -3471,20 +3480,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = { | |||
3471 | 3480 | ||
3472 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | 3481 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
3473 | { | 3482 | { |
3474 | int rc, xid; | 3483 | int xid, rc = 0; |
3475 | struct inode *inode = file_inode(file); | 3484 | struct inode *inode = file_inode(file); |
3476 | 3485 | ||
3477 | xid = get_xid(); | 3486 | xid = get_xid(); |
3478 | 3487 | ||
3479 | if (!CIFS_CACHE_READ(CIFS_I(inode))) { | 3488 | if (!CIFS_CACHE_READ(CIFS_I(inode))) |
3480 | rc = cifs_zap_mapping(inode); | 3489 | rc = cifs_zap_mapping(inode); |
3481 | if (rc) | 3490 | if (!rc) |
3482 | return rc; | 3491 | rc = generic_file_mmap(file, vma); |
3483 | } | 3492 | if (!rc) |
3484 | |||
3485 | rc = generic_file_mmap(file, vma); | ||
3486 | if (rc == 0) | ||
3487 | vma->vm_ops = &cifs_file_vm_ops; | 3493 | vma->vm_ops = &cifs_file_vm_ops; |
3494 | |||
3488 | free_xid(xid); | 3495 | free_xid(xid); |
3489 | return rc; | 3496 | return rc; |
3490 | } | 3497 | } |
@@ -3494,16 +3501,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
3494 | int rc, xid; | 3501 | int rc, xid; |
3495 | 3502 | ||
3496 | xid = get_xid(); | 3503 | xid = get_xid(); |
3504 | |||
3497 | rc = cifs_revalidate_file(file); | 3505 | rc = cifs_revalidate_file(file); |
3498 | if (rc) { | 3506 | if (rc) |
3499 | cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", | 3507 | cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", |
3500 | rc); | 3508 | rc); |
3501 | free_xid(xid); | 3509 | if (!rc) |
3502 | return rc; | 3510 | rc = generic_file_mmap(file, vma); |
3503 | } | 3511 | if (!rc) |
3504 | rc = generic_file_mmap(file, vma); | ||
3505 | if (rc == 0) | ||
3506 | vma->vm_ops = &cifs_file_vm_ops; | 3512 | vma->vm_ops = &cifs_file_vm_ops; |
3513 | |||
3507 | free_xid(xid); | 3514 | free_xid(xid); |
3508 | return rc; | 3515 | return rc; |
3509 | } | 3516 | } |
@@ -3600,6 +3607,10 @@ readpages_fill_pages(struct TCP_Server_Info *server, | |||
3600 | 3607 | ||
3601 | if (iter) | 3608 | if (iter) |
3602 | result = copy_page_from_iter(page, 0, n, iter); | 3609 | result = copy_page_from_iter(page, 0, n, iter); |
3610 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
3611 | else if (rdata->mr) | ||
3612 | result = n; | ||
3613 | #endif | ||
3603 | else | 3614 | else |
3604 | result = cifs_read_page_from_socket(server, page, n); | 3615 | result = cifs_read_page_from_socket(server, page, n); |
3605 | if (result < 0) | 3616 | if (result < 0) |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ecb99079363a..8f9a8cc7cc62 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1049,7 +1049,7 @@ iget_no_retry: | |||
1049 | tcon->resource_id = CIFS_I(inode)->uniqueid; | 1049 | tcon->resource_id = CIFS_I(inode)->uniqueid; |
1050 | #endif | 1050 | #endif |
1051 | 1051 | ||
1052 | if (rc && tcon->ipc) { | 1052 | if (rc && tcon->pipe) { |
1053 | cifs_dbg(FYI, "ipc connection - fake read inode\n"); | 1053 | cifs_dbg(FYI, "ipc connection - fake read inode\n"); |
1054 | spin_lock(&inode->i_lock); | 1054 | spin_lock(&inode->i_lock); |
1055 | inode->i_mode |= S_IFDIR; | 1055 | inode->i_mode |= S_IFDIR; |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index eea93ac15ef0..a0dbced4a45c 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free) | |||
98 | kfree(buf_to_free->serverOS); | 98 | kfree(buf_to_free->serverOS); |
99 | kfree(buf_to_free->serverDomain); | 99 | kfree(buf_to_free->serverDomain); |
100 | kfree(buf_to_free->serverNOS); | 100 | kfree(buf_to_free->serverNOS); |
101 | if (buf_to_free->password) { | 101 | kzfree(buf_to_free->password); |
102 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); | ||
103 | kfree(buf_to_free->password); | ||
104 | } | ||
105 | kfree(buf_to_free->user_name); | 102 | kfree(buf_to_free->user_name); |
106 | kfree(buf_to_free->domainName); | 103 | kfree(buf_to_free->domainName); |
107 | kfree(buf_to_free->auth_key.response); | 104 | kzfree(buf_to_free->auth_key.response); |
108 | kfree(buf_to_free); | 105 | kzfree(buf_to_free); |
109 | } | 106 | } |
110 | 107 | ||
111 | struct cifs_tcon * | 108 | struct cifs_tcon * |
@@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free) | |||
136 | } | 133 | } |
137 | atomic_dec(&tconInfoAllocCount); | 134 | atomic_dec(&tconInfoAllocCount); |
138 | kfree(buf_to_free->nativeFileSystem); | 135 | kfree(buf_to_free->nativeFileSystem); |
139 | if (buf_to_free->password) { | 136 | kzfree(buf_to_free->password); |
140 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); | ||
141 | kfree(buf_to_free->password); | ||
142 | } | ||
143 | kfree(buf_to_free); | 137 | kfree(buf_to_free); |
144 | } | 138 | } |
145 | 139 | ||
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index a723df3e0197..3d495e440c87 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -87,9 +87,11 @@ cifs_read_data_offset(char *buf) | |||
87 | } | 87 | } |
88 | 88 | ||
89 | static unsigned int | 89 | static unsigned int |
90 | cifs_read_data_length(char *buf) | 90 | cifs_read_data_length(char *buf, bool in_remaining) |
91 | { | 91 | { |
92 | READ_RSP *rsp = (READ_RSP *)buf; | 92 | READ_RSP *rsp = (READ_RSP *)buf; |
93 | /* It's a bug reading remaining data for SMB1 packets */ | ||
94 | WARN_ON(in_remaining); | ||
93 | return (le16_to_cpu(rsp->DataLengthHigh) << 16) + | 95 | return (le16_to_cpu(rsp->DataLengthHigh) << 16) + |
94 | le16_to_cpu(rsp->DataLength); | 96 | le16_to_cpu(rsp->DataLength); |
95 | } | 97 | } |
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index b4b1f0305f29..12af5dba742b 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c | |||
@@ -74,7 +74,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
74 | nr_ioctl_req.Reserved = 0; | 74 | nr_ioctl_req.Reserved = 0; |
75 | rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, | 75 | rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, |
76 | fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, | 76 | fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, |
77 | true /* is_fsctl */, false /* use_ipc */, | 77 | true /* is_fsctl */, |
78 | (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), | 78 | (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), |
79 | NULL, NULL /* no return info */); | 79 | NULL, NULL /* no return info */); |
80 | if (rc == -EOPNOTSUPP) { | 80 | if (rc == -EOPNOTSUPP) { |
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 7b08a1446a7f..76d03abaa38c 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
@@ -578,7 +578,7 @@ smb2_is_valid_lease_break(char *buffer) | |||
578 | bool | 578 | bool |
579 | smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) | 579 | smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
580 | { | 580 | { |
581 | struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer; | 581 | struct smb2_oplock_break_rsp *rsp = (struct smb2_oplock_break_rsp *)buffer; |
582 | struct list_head *tmp, *tmp1, *tmp2; | 582 | struct list_head *tmp, *tmp1, *tmp2; |
583 | struct cifs_ses *ses; | 583 | struct cifs_ses *ses; |
584 | struct cifs_tcon *tcon; | 584 | struct cifs_tcon *tcon; |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index ed88ab8a4774..eb68e2fcc500 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "smb2status.h" | 32 | #include "smb2status.h" |
33 | #include "smb2glob.h" | 33 | #include "smb2glob.h" |
34 | #include "cifs_ioctl.h" | 34 | #include "cifs_ioctl.h" |
35 | #include "smbdirect.h" | ||
35 | 36 | ||
36 | static int | 37 | static int |
37 | change_conf(struct TCP_Server_Info *server) | 38 | change_conf(struct TCP_Server_Info *server) |
@@ -250,7 +251,11 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
250 | /* start with specified wsize, or default */ | 251 | /* start with specified wsize, or default */ |
251 | wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; | 252 | wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; |
252 | wsize = min_t(unsigned int, wsize, server->max_write); | 253 | wsize = min_t(unsigned int, wsize, server->max_write); |
253 | 254 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
255 | if (server->rdma) | ||
256 | wsize = min_t(unsigned int, | ||
257 | wsize, server->smbd_conn->max_readwrite_size); | ||
258 | #endif | ||
254 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) | 259 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) |
255 | wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); | 260 | wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); |
256 | 261 | ||
@@ -266,6 +271,11 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
266 | /* start with specified rsize, or default */ | 271 | /* start with specified rsize, or default */ |
267 | rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; | 272 | rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; |
268 | rsize = min_t(unsigned int, rsize, server->max_read); | 273 | rsize = min_t(unsigned int, rsize, server->max_read); |
274 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
275 | if (server->rdma) | ||
276 | rsize = min_t(unsigned int, | ||
277 | rsize, server->smbd_conn->max_readwrite_size); | ||
278 | #endif | ||
269 | 279 | ||
270 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) | 280 | if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) |
271 | rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); | 281 | rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); |
@@ -283,7 +293,6 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) | |||
283 | 293 | ||
284 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 294 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
285 | FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, | 295 | FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, |
286 | false /* use_ipc */, | ||
287 | NULL /* no data input */, 0 /* no data input */, | 296 | NULL /* no data input */, 0 /* no data input */, |
288 | (char **)&out_buf, &ret_data_len); | 297 | (char **)&out_buf, &ret_data_len); |
289 | if (rc != 0) | 298 | if (rc != 0) |
@@ -782,7 +791,6 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, | |||
782 | 791 | ||
783 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, | 792 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, |
784 | FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, | 793 | FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, |
785 | false /* use_ipc */, | ||
786 | NULL, 0 /* no input */, | 794 | NULL, 0 /* no input */, |
787 | (char **)&res_key, &ret_data_len); | 795 | (char **)&res_key, &ret_data_len); |
788 | 796 | ||
@@ -848,8 +856,7 @@ smb2_copychunk_range(const unsigned int xid, | |||
848 | /* Request server copy to target from src identified by key */ | 856 | /* Request server copy to target from src identified by key */ |
849 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, | 857 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, |
850 | trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, | 858 | trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, |
851 | true /* is_fsctl */, false /* use_ipc */, | 859 | true /* is_fsctl */, (char *)pcchunk, |
852 | (char *)pcchunk, | ||
853 | sizeof(struct copychunk_ioctl), (char **)&retbuf, | 860 | sizeof(struct copychunk_ioctl), (char **)&retbuf, |
854 | &ret_data_len); | 861 | &ret_data_len); |
855 | if (rc == 0) { | 862 | if (rc == 0) { |
@@ -947,9 +954,13 @@ smb2_read_data_offset(char *buf) | |||
947 | } | 954 | } |
948 | 955 | ||
949 | static unsigned int | 956 | static unsigned int |
950 | smb2_read_data_length(char *buf) | 957 | smb2_read_data_length(char *buf, bool in_remaining) |
951 | { | 958 | { |
952 | struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf; | 959 | struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf; |
960 | |||
961 | if (in_remaining) | ||
962 | return le32_to_cpu(rsp->DataRemaining); | ||
963 | |||
953 | return le32_to_cpu(rsp->DataLength); | 964 | return le32_to_cpu(rsp->DataLength); |
954 | } | 965 | } |
955 | 966 | ||
@@ -1006,7 +1017,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, | |||
1006 | 1017 | ||
1007 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1018 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
1008 | cfile->fid.volatile_fid, FSCTL_SET_SPARSE, | 1019 | cfile->fid.volatile_fid, FSCTL_SET_SPARSE, |
1009 | true /* is_fctl */, false /* use_ipc */, | 1020 | true /* is_fctl */, |
1010 | &setsparse, 1, NULL, NULL); | 1021 | &setsparse, 1, NULL, NULL); |
1011 | if (rc) { | 1022 | if (rc) { |
1012 | tcon->broken_sparse_sup = true; | 1023 | tcon->broken_sparse_sup = true; |
@@ -1077,7 +1088,7 @@ smb2_duplicate_extents(const unsigned int xid, | |||
1077 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, | 1088 | rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, |
1078 | trgtfile->fid.volatile_fid, | 1089 | trgtfile->fid.volatile_fid, |
1079 | FSCTL_DUPLICATE_EXTENTS_TO_FILE, | 1090 | FSCTL_DUPLICATE_EXTENTS_TO_FILE, |
1080 | true /* is_fsctl */, false /* use_ipc */, | 1091 | true /* is_fsctl */, |
1081 | (char *)&dup_ext_buf, | 1092 | (char *)&dup_ext_buf, |
1082 | sizeof(struct duplicate_extents_to_file), | 1093 | sizeof(struct duplicate_extents_to_file), |
1083 | NULL, | 1094 | NULL, |
@@ -1112,7 +1123,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, | |||
1112 | return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1123 | return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
1113 | cfile->fid.volatile_fid, | 1124 | cfile->fid.volatile_fid, |
1114 | FSCTL_SET_INTEGRITY_INFORMATION, | 1125 | FSCTL_SET_INTEGRITY_INFORMATION, |
1115 | true /* is_fsctl */, false /* use_ipc */, | 1126 | true /* is_fsctl */, |
1116 | (char *)&integr_info, | 1127 | (char *)&integr_info, |
1117 | sizeof(struct fsctl_set_integrity_information_req), | 1128 | sizeof(struct fsctl_set_integrity_information_req), |
1118 | NULL, | 1129 | NULL, |
@@ -1132,7 +1143,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, | |||
1132 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1143 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
1133 | cfile->fid.volatile_fid, | 1144 | cfile->fid.volatile_fid, |
1134 | FSCTL_SRV_ENUMERATE_SNAPSHOTS, | 1145 | FSCTL_SRV_ENUMERATE_SNAPSHOTS, |
1135 | true /* is_fsctl */, false /* use_ipc */, | 1146 | true /* is_fsctl */, |
1136 | NULL, 0 /* no input data */, | 1147 | NULL, 0 /* no input data */, |
1137 | (char **)&retbuf, | 1148 | (char **)&retbuf, |
1138 | &ret_data_len); | 1149 | &ret_data_len); |
@@ -1351,16 +1362,20 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, | |||
1351 | cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name); | 1362 | cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name); |
1352 | 1363 | ||
1353 | /* | 1364 | /* |
1354 | * Use any tcon from the current session. Here, the first one. | 1365 | * Try to use the IPC tcon, otherwise just use any |
1355 | */ | 1366 | */ |
1356 | spin_lock(&cifs_tcp_ses_lock); | 1367 | tcon = ses->tcon_ipc; |
1357 | tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon, | 1368 | if (tcon == NULL) { |
1358 | tcon_list); | 1369 | spin_lock(&cifs_tcp_ses_lock); |
1359 | if (tcon) | 1370 | tcon = list_first_entry_or_null(&ses->tcon_list, |
1360 | tcon->tc_count++; | 1371 | struct cifs_tcon, |
1361 | spin_unlock(&cifs_tcp_ses_lock); | 1372 | tcon_list); |
1373 | if (tcon) | ||
1374 | tcon->tc_count++; | ||
1375 | spin_unlock(&cifs_tcp_ses_lock); | ||
1376 | } | ||
1362 | 1377 | ||
1363 | if (!tcon) { | 1378 | if (tcon == NULL) { |
1364 | cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n", | 1379 | cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n", |
1365 | ses); | 1380 | ses); |
1366 | rc = -ENOTCONN; | 1381 | rc = -ENOTCONN; |
@@ -1389,20 +1404,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, | |||
1389 | memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len); | 1404 | memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len); |
1390 | 1405 | ||
1391 | do { | 1406 | do { |
1392 | /* try first with IPC */ | ||
1393 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 1407 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
1394 | FSCTL_DFS_GET_REFERRALS, | 1408 | FSCTL_DFS_GET_REFERRALS, |
1395 | true /* is_fsctl */, true /* use_ipc */, | 1409 | true /* is_fsctl */, |
1396 | (char *)dfs_req, dfs_req_size, | 1410 | (char *)dfs_req, dfs_req_size, |
1397 | (char **)&dfs_rsp, &dfs_rsp_size); | 1411 | (char **)&dfs_rsp, &dfs_rsp_size); |
1398 | if (rc == -ENOTCONN) { | ||
1399 | /* try with normal tcon */ | ||
1400 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | ||
1401 | FSCTL_DFS_GET_REFERRALS, | ||
1402 | true /* is_fsctl */, false /*use_ipc*/, | ||
1403 | (char *)dfs_req, dfs_req_size, | ||
1404 | (char **)&dfs_rsp, &dfs_rsp_size); | ||
1405 | } | ||
1406 | } while (rc == -EAGAIN); | 1412 | } while (rc == -EAGAIN); |
1407 | 1413 | ||
1408 | if (rc) { | 1414 | if (rc) { |
@@ -1421,7 +1427,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, | |||
1421 | } | 1427 | } |
1422 | 1428 | ||
1423 | out: | 1429 | out: |
1424 | if (tcon) { | 1430 | if (tcon && !tcon->ipc) { |
1431 | /* ipc tcons are not refcounted */ | ||
1425 | spin_lock(&cifs_tcp_ses_lock); | 1432 | spin_lock(&cifs_tcp_ses_lock); |
1426 | tcon->tc_count--; | 1433 | tcon->tc_count--; |
1427 | spin_unlock(&cifs_tcp_ses_lock); | 1434 | spin_unlock(&cifs_tcp_ses_lock); |
@@ -1713,8 +1720,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, | |||
1713 | 1720 | ||
1714 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1721 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
1715 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, | 1722 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, |
1716 | true /* is_fctl */, false /* use_ipc */, | 1723 | true /* is_fctl */, (char *)&fsctl_buf, |
1717 | (char *)&fsctl_buf, | ||
1718 | sizeof(struct file_zero_data_information), NULL, NULL); | 1724 | sizeof(struct file_zero_data_information), NULL, NULL); |
1719 | free_xid(xid); | 1725 | free_xid(xid); |
1720 | return rc; | 1726 | return rc; |
@@ -1748,8 +1754,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, | |||
1748 | 1754 | ||
1749 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, | 1755 | rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, |
1750 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, | 1756 | cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, |
1751 | true /* is_fctl */, false /* use_ipc */, | 1757 | true /* is_fctl */, (char *)&fsctl_buf, |
1752 | (char *)&fsctl_buf, | ||
1753 | sizeof(struct file_zero_data_information), NULL, NULL); | 1758 | sizeof(struct file_zero_data_information), NULL, NULL); |
1754 | free_xid(xid); | 1759 | free_xid(xid); |
1755 | return rc; | 1760 | return rc; |
@@ -2411,6 +2416,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | |||
2411 | struct iov_iter iter; | 2416 | struct iov_iter iter; |
2412 | struct kvec iov; | 2417 | struct kvec iov; |
2413 | int length; | 2418 | int length; |
2419 | bool use_rdma_mr = false; | ||
2414 | 2420 | ||
2415 | if (shdr->Command != SMB2_READ) { | 2421 | if (shdr->Command != SMB2_READ) { |
2416 | cifs_dbg(VFS, "only big read responses are supported\n"); | 2422 | cifs_dbg(VFS, "only big read responses are supported\n"); |
@@ -2437,7 +2443,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | |||
2437 | } | 2443 | } |
2438 | 2444 | ||
2439 | data_offset = server->ops->read_data_offset(buf) + 4; | 2445 | data_offset = server->ops->read_data_offset(buf) + 4; |
2440 | data_len = server->ops->read_data_length(buf); | 2446 | #ifdef CONFIG_CIFS_SMB_DIRECT |
2447 | use_rdma_mr = rdata->mr; | ||
2448 | #endif | ||
2449 | data_len = server->ops->read_data_length(buf, use_rdma_mr); | ||
2441 | 2450 | ||
2442 | if (data_offset < server->vals->read_rsp_size) { | 2451 | if (data_offset < server->vals->read_rsp_size) { |
2443 | /* | 2452 | /* |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 01346b8b6edb..63778ac22fd9 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include "smb2glob.h" | 48 | #include "smb2glob.h" |
49 | #include "cifspdu.h" | 49 | #include "cifspdu.h" |
50 | #include "cifs_spnego.h" | 50 | #include "cifs_spnego.h" |
51 | #include "smbdirect.h" | ||
51 | 52 | ||
52 | /* | 53 | /* |
53 | * The following table defines the expected "StructureSize" of SMB2 requests | 54 | * The following table defines the expected "StructureSize" of SMB2 requests |
@@ -319,54 +320,16 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, | |||
319 | *total_len = parmsize + sizeof(struct smb2_sync_hdr); | 320 | *total_len = parmsize + sizeof(struct smb2_sync_hdr); |
320 | } | 321 | } |
321 | 322 | ||
322 | /* init request without RFC1001 length at the beginning */ | ||
323 | static int | ||
324 | smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, | ||
325 | void **request_buf, unsigned int *total_len) | ||
326 | { | ||
327 | int rc; | ||
328 | struct smb2_sync_hdr *shdr; | ||
329 | |||
330 | rc = smb2_reconnect(smb2_command, tcon); | ||
331 | if (rc) | ||
332 | return rc; | ||
333 | |||
334 | /* BB eventually switch this to SMB2 specific small buf size */ | ||
335 | *request_buf = cifs_small_buf_get(); | ||
336 | if (*request_buf == NULL) { | ||
337 | /* BB should we add a retry in here if not a writepage? */ | ||
338 | return -ENOMEM; | ||
339 | } | ||
340 | |||
341 | shdr = (struct smb2_sync_hdr *)(*request_buf); | ||
342 | |||
343 | fill_small_buf(smb2_command, tcon, shdr, total_len); | ||
344 | |||
345 | if (tcon != NULL) { | ||
346 | #ifdef CONFIG_CIFS_STATS2 | ||
347 | uint16_t com_code = le16_to_cpu(smb2_command); | ||
348 | |||
349 | cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); | ||
350 | #endif | ||
351 | cifs_stats_inc(&tcon->num_smbs_sent); | ||
352 | } | ||
353 | |||
354 | return rc; | ||
355 | } | ||
356 | |||
357 | /* | 323 | /* |
358 | * Allocate and return pointer to an SMB request hdr, and set basic | 324 | * Allocate and return pointer to an SMB request hdr, and set basic |
359 | * SMB information in the SMB header. If the return code is zero, this | 325 | * SMB information in the SMB header. If the return code is zero, this |
360 | * function must have filled in request_buf pointer. The returned buffer | 326 | * function must have filled in request_buf pointer. |
361 | * has RFC1001 length at the beginning. | ||
362 | */ | 327 | */ |
363 | static int | 328 | static int |
364 | small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, | 329 | smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, |
365 | void **request_buf) | 330 | void **request_buf, unsigned int *total_len) |
366 | { | 331 | { |
367 | int rc; | 332 | int rc; |
368 | unsigned int total_len; | ||
369 | struct smb2_pdu *pdu; | ||
370 | 333 | ||
371 | rc = smb2_reconnect(smb2_command, tcon); | 334 | rc = smb2_reconnect(smb2_command, tcon); |
372 | if (rc) | 335 | if (rc) |
@@ -379,12 +342,9 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, | |||
379 | return -ENOMEM; | 342 | return -ENOMEM; |
380 | } | 343 | } |
381 | 344 | ||
382 | pdu = (struct smb2_pdu *)(*request_buf); | 345 | fill_small_buf(smb2_command, tcon, |
383 | 346 | (struct smb2_sync_hdr *)(*request_buf), | |
384 | fill_small_buf(smb2_command, tcon, get_sync_hdr(pdu), &total_len); | 347 | total_len); |
385 | |||
386 | /* Note this is only network field converted to big endian */ | ||
387 | pdu->hdr.smb2_buf_length = cpu_to_be32(total_len); | ||
388 | 348 | ||
389 | if (tcon != NULL) { | 349 | if (tcon != NULL) { |
390 | #ifdef CONFIG_CIFS_STATS2 | 350 | #ifdef CONFIG_CIFS_STATS2 |
@@ -398,8 +358,8 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, | |||
398 | } | 358 | } |
399 | 359 | ||
400 | #ifdef CONFIG_CIFS_SMB311 | 360 | #ifdef CONFIG_CIFS_SMB311 |
401 | /* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */ | 361 | /* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */ |
402 | #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */ | 362 | #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */ |
403 | 363 | ||
404 | 364 | ||
405 | #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1) | 365 | #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1) |
@@ -427,23 +387,25 @@ build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) | |||
427 | } | 387 | } |
428 | 388 | ||
429 | static void | 389 | static void |
430 | assemble_neg_contexts(struct smb2_negotiate_req *req) | 390 | assemble_neg_contexts(struct smb2_negotiate_req *req, |
391 | unsigned int *total_len) | ||
431 | { | 392 | { |
432 | 393 | char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT; | |
433 | /* +4 is to account for the RFC1001 len field */ | ||
434 | char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4; | ||
435 | 394 | ||
436 | build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); | 395 | build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); |
437 | /* Add 2 to size to round to 8 byte boundary */ | 396 | /* Add 2 to size to round to 8 byte boundary */ |
397 | |||
438 | pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context); | 398 | pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context); |
439 | build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); | 399 | build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); |
440 | req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); | 400 | req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); |
441 | req->NegotiateContextCount = cpu_to_le16(2); | 401 | req->NegotiateContextCount = cpu_to_le16(2); |
442 | inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) | 402 | |
443 | + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */ | 403 | *total_len += 4 + sizeof(struct smb2_preauth_neg_context) |
404 | + sizeof(struct smb2_encryption_neg_context); | ||
444 | } | 405 | } |
445 | #else | 406 | #else |
446 | static void assemble_neg_contexts(struct smb2_negotiate_req *req) | 407 | static void assemble_neg_contexts(struct smb2_negotiate_req *req, |
408 | unsigned int *total_len) | ||
447 | { | 409 | { |
448 | return; | 410 | return; |
449 | } | 411 | } |
@@ -477,6 +439,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
477 | int blob_offset, blob_length; | 439 | int blob_offset, blob_length; |
478 | char *security_blob; | 440 | char *security_blob; |
479 | int flags = CIFS_NEG_OP; | 441 | int flags = CIFS_NEG_OP; |
442 | unsigned int total_len; | ||
480 | 443 | ||
481 | cifs_dbg(FYI, "Negotiate protocol\n"); | 444 | cifs_dbg(FYI, "Negotiate protocol\n"); |
482 | 445 | ||
@@ -485,30 +448,30 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
485 | return -EIO; | 448 | return -EIO; |
486 | } | 449 | } |
487 | 450 | ||
488 | rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); | 451 | rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len); |
489 | if (rc) | 452 | if (rc) |
490 | return rc; | 453 | return rc; |
491 | 454 | ||
492 | req->hdr.sync_hdr.SessionId = 0; | 455 | req->sync_hdr.SessionId = 0; |
493 | 456 | ||
494 | if (strcmp(ses->server->vals->version_string, | 457 | if (strcmp(ses->server->vals->version_string, |
495 | SMB3ANY_VERSION_STRING) == 0) { | 458 | SMB3ANY_VERSION_STRING) == 0) { |
496 | req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); | 459 | req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); |
497 | req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); | 460 | req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); |
498 | req->DialectCount = cpu_to_le16(2); | 461 | req->DialectCount = cpu_to_le16(2); |
499 | inc_rfc1001_len(req, 4); | 462 | total_len += 4; |
500 | } else if (strcmp(ses->server->vals->version_string, | 463 | } else if (strcmp(ses->server->vals->version_string, |
501 | SMBDEFAULT_VERSION_STRING) == 0) { | 464 | SMBDEFAULT_VERSION_STRING) == 0) { |
502 | req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); | 465 | req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); |
503 | req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); | 466 | req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); |
504 | req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); | 467 | req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); |
505 | req->DialectCount = cpu_to_le16(3); | 468 | req->DialectCount = cpu_to_le16(3); |
506 | inc_rfc1001_len(req, 6); | 469 | total_len += 6; |
507 | } else { | 470 | } else { |
508 | /* otherwise send specific dialect */ | 471 | /* otherwise send specific dialect */ |
509 | req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); | 472 | req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); |
510 | req->DialectCount = cpu_to_le16(1); | 473 | req->DialectCount = cpu_to_le16(1); |
511 | inc_rfc1001_len(req, 2); | 474 | total_len += 2; |
512 | } | 475 | } |
513 | 476 | ||
514 | /* only one of SMB2 signing flags may be set in SMB2 request */ | 477 | /* only one of SMB2 signing flags may be set in SMB2 request */ |
@@ -528,13 +491,12 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
528 | memcpy(req->ClientGUID, server->client_guid, | 491 | memcpy(req->ClientGUID, server->client_guid, |
529 | SMB2_CLIENT_GUID_SIZE); | 492 | SMB2_CLIENT_GUID_SIZE); |
530 | if (ses->server->vals->protocol_id == SMB311_PROT_ID) | 493 | if (ses->server->vals->protocol_id == SMB311_PROT_ID) |
531 | assemble_neg_contexts(req); | 494 | assemble_neg_contexts(req, &total_len); |
532 | } | 495 | } |
533 | iov[0].iov_base = (char *)req; | 496 | iov[0].iov_base = (char *)req; |
534 | /* 4 for rfc1002 length field */ | 497 | iov[0].iov_len = total_len; |
535 | iov[0].iov_len = get_rfc1002_length(req) + 4; | ||
536 | 498 | ||
537 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); | 499 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
538 | cifs_small_buf_release(req); | 500 | cifs_small_buf_release(req); |
539 | rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; | 501 | rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base; |
540 | /* | 502 | /* |
@@ -654,6 +616,11 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
654 | 616 | ||
655 | cifs_dbg(FYI, "validate negotiate\n"); | 617 | cifs_dbg(FYI, "validate negotiate\n"); |
656 | 618 | ||
619 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
620 | if (tcon->ses->server->rdma) | ||
621 | return 0; | ||
622 | #endif | ||
623 | |||
657 | /* | 624 | /* |
658 | * validation ioctl must be signed, so no point sending this if we | 625 | * validation ioctl must be signed, so no point sending this if we |
659 | * can not sign it (ie are not known user). Even if signing is not | 626 | * can not sign it (ie are not known user). Even if signing is not |
@@ -713,7 +680,6 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
713 | 680 | ||
714 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 681 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
715 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, | 682 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, |
716 | false /* use_ipc */, | ||
717 | (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), | 683 | (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req), |
718 | (char **)&pneg_rsp, &rsplen); | 684 | (char **)&pneg_rsp, &rsplen); |
719 | 685 | ||
@@ -733,8 +699,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
733 | } | 699 | } |
734 | 700 | ||
735 | /* check validate negotiate info response matches what we got earlier */ | 701 | /* check validate negotiate info response matches what we got earlier */ |
736 | if (pneg_rsp->Dialect != | 702 | if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect)) |
737 | cpu_to_le16(tcon->ses->server->vals->protocol_id)) | ||
738 | goto vneg_out; | 703 | goto vneg_out; |
739 | 704 | ||
740 | if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) | 705 | if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) |
@@ -806,20 +771,22 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) | |||
806 | struct cifs_ses *ses = sess_data->ses; | 771 | struct cifs_ses *ses = sess_data->ses; |
807 | struct smb2_sess_setup_req *req; | 772 | struct smb2_sess_setup_req *req; |
808 | struct TCP_Server_Info *server = ses->server; | 773 | struct TCP_Server_Info *server = ses->server; |
774 | unsigned int total_len; | ||
809 | 775 | ||
810 | rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req); | 776 | rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req, |
777 | &total_len); | ||
811 | if (rc) | 778 | if (rc) |
812 | return rc; | 779 | return rc; |
813 | 780 | ||
814 | /* First session, not a reauthenticate */ | 781 | /* First session, not a reauthenticate */ |
815 | req->hdr.sync_hdr.SessionId = 0; | 782 | req->sync_hdr.SessionId = 0; |
816 | 783 | ||
817 | /* if reconnect, we need to send previous sess id, otherwise it is 0 */ | 784 | /* if reconnect, we need to send previous sess id, otherwise it is 0 */ |
818 | req->PreviousSessionId = sess_data->previous_session; | 785 | req->PreviousSessionId = sess_data->previous_session; |
819 | 786 | ||
820 | req->Flags = 0; /* MBZ */ | 787 | req->Flags = 0; /* MBZ */ |
821 | /* to enable echos and oplocks */ | 788 | /* to enable echos and oplocks */ |
822 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(3); | 789 | req->sync_hdr.CreditRequest = cpu_to_le16(3); |
823 | 790 | ||
824 | /* only one of SMB2 signing flags may be set in SMB2 request */ | 791 | /* only one of SMB2 signing flags may be set in SMB2 request */ |
825 | if (server->sign) | 792 | if (server->sign) |
@@ -833,8 +800,8 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) | |||
833 | req->Channel = 0; /* MBZ */ | 800 | req->Channel = 0; /* MBZ */ |
834 | 801 | ||
835 | sess_data->iov[0].iov_base = (char *)req; | 802 | sess_data->iov[0].iov_base = (char *)req; |
836 | /* 4 for rfc1002 length field and 1 for pad */ | 803 | /* 1 for pad */ |
837 | sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; | 804 | sess_data->iov[0].iov_len = total_len - 1; |
838 | /* | 805 | /* |
839 | * This variable will be used to clear the buffer | 806 | * This variable will be used to clear the buffer |
840 | * allocated above in case of any error in the calling function. | 807 | * allocated above in case of any error in the calling function. |
@@ -860,18 +827,15 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) | |||
860 | 827 | ||
861 | /* Testing shows that buffer offset must be at location of Buffer[0] */ | 828 | /* Testing shows that buffer offset must be at location of Buffer[0] */ |
862 | req->SecurityBufferOffset = | 829 | req->SecurityBufferOffset = |
863 | cpu_to_le16(sizeof(struct smb2_sess_setup_req) - | 830 | cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */); |
864 | 1 /* pad */ - 4 /* rfc1001 len */); | ||
865 | req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); | 831 | req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len); |
866 | 832 | ||
867 | inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */); | ||
868 | |||
869 | /* BB add code to build os and lm fields */ | 833 | /* BB add code to build os and lm fields */ |
870 | 834 | ||
871 | rc = SendReceive2(sess_data->xid, sess_data->ses, | 835 | rc = smb2_send_recv(sess_data->xid, sess_data->ses, |
872 | sess_data->iov, 2, | 836 | sess_data->iov, 2, |
873 | &sess_data->buf0_type, | 837 | &sess_data->buf0_type, |
874 | CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov); | 838 | CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov); |
875 | cifs_small_buf_release(sess_data->iov[0].iov_base); | 839 | cifs_small_buf_release(sess_data->iov[0].iov_base); |
876 | memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); | 840 | memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); |
877 | 841 | ||
@@ -1092,7 +1056,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) | |||
1092 | goto out; | 1056 | goto out; |
1093 | 1057 | ||
1094 | req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; | 1058 | req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; |
1095 | req->hdr.sync_hdr.SessionId = ses->Suid; | 1059 | req->sync_hdr.SessionId = ses->Suid; |
1096 | 1060 | ||
1097 | rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, | 1061 | rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, |
1098 | sess_data->nls_cp); | 1062 | sess_data->nls_cp); |
@@ -1202,6 +1166,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) | |||
1202 | int rc = 0; | 1166 | int rc = 0; |
1203 | struct TCP_Server_Info *server; | 1167 | struct TCP_Server_Info *server; |
1204 | int flags = 0; | 1168 | int flags = 0; |
1169 | unsigned int total_len; | ||
1170 | struct kvec iov[1]; | ||
1171 | struct kvec rsp_iov; | ||
1172 | int resp_buf_type; | ||
1205 | 1173 | ||
1206 | cifs_dbg(FYI, "disconnect session %p\n", ses); | 1174 | cifs_dbg(FYI, "disconnect session %p\n", ses); |
1207 | 1175 | ||
@@ -1214,19 +1182,24 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses) | |||
1214 | if (ses->need_reconnect) | 1182 | if (ses->need_reconnect) |
1215 | goto smb2_session_already_dead; | 1183 | goto smb2_session_already_dead; |
1216 | 1184 | ||
1217 | rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req); | 1185 | rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len); |
1218 | if (rc) | 1186 | if (rc) |
1219 | return rc; | 1187 | return rc; |
1220 | 1188 | ||
1221 | /* since no tcon, smb2_init can not do this, so do here */ | 1189 | /* since no tcon, smb2_init can not do this, so do here */ |
1222 | req->hdr.sync_hdr.SessionId = ses->Suid; | 1190 | req->sync_hdr.SessionId = ses->Suid; |
1223 | 1191 | ||
1224 | if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) | 1192 | if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) |
1225 | flags |= CIFS_TRANSFORM_REQ; | 1193 | flags |= CIFS_TRANSFORM_REQ; |
1226 | else if (server->sign) | 1194 | else if (server->sign) |
1227 | req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; | 1195 | req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED; |
1196 | |||
1197 | flags |= CIFS_NO_RESP; | ||
1198 | |||
1199 | iov[0].iov_base = (char *)req; | ||
1200 | iov[0].iov_len = total_len; | ||
1228 | 1201 | ||
1229 | rc = SendReceiveNoRsp(xid, ses, (char *) req, flags); | 1202 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); |
1230 | cifs_small_buf_release(req); | 1203 | cifs_small_buf_release(req); |
1231 | /* | 1204 | /* |
1232 | * No tcon so can't do | 1205 | * No tcon so can't do |
@@ -1265,6 +1238,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
1265 | int unc_path_len; | 1238 | int unc_path_len; |
1266 | __le16 *unc_path = NULL; | 1239 | __le16 *unc_path = NULL; |
1267 | int flags = 0; | 1240 | int flags = 0; |
1241 | unsigned int total_len; | ||
1268 | 1242 | ||
1269 | cifs_dbg(FYI, "TCON\n"); | 1243 | cifs_dbg(FYI, "TCON\n"); |
1270 | 1244 | ||
@@ -1283,40 +1257,30 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
1283 | } | 1257 | } |
1284 | 1258 | ||
1285 | /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ | 1259 | /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ |
1286 | if (tcon) | 1260 | tcon->tid = 0; |
1287 | tcon->tid = 0; | ||
1288 | 1261 | ||
1289 | rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); | 1262 | rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req, |
1263 | &total_len); | ||
1290 | if (rc) { | 1264 | if (rc) { |
1291 | kfree(unc_path); | 1265 | kfree(unc_path); |
1292 | return rc; | 1266 | return rc; |
1293 | } | 1267 | } |
1294 | 1268 | ||
1295 | if (tcon == NULL) { | 1269 | if (encryption_required(tcon)) |
1296 | if ((ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)) | ||
1297 | flags |= CIFS_TRANSFORM_REQ; | ||
1298 | |||
1299 | /* since no tcon, smb2_init can not do this, so do here */ | ||
1300 | req->hdr.sync_hdr.SessionId = ses->Suid; | ||
1301 | if (ses->server->sign) | ||
1302 | req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; | ||
1303 | } else if (encryption_required(tcon)) | ||
1304 | flags |= CIFS_TRANSFORM_REQ; | 1270 | flags |= CIFS_TRANSFORM_REQ; |
1305 | 1271 | ||
1306 | iov[0].iov_base = (char *)req; | 1272 | iov[0].iov_base = (char *)req; |
1307 | /* 4 for rfc1002 length field and 1 for pad */ | 1273 | /* 1 for pad */ |
1308 | iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; | 1274 | iov[0].iov_len = total_len - 1; |
1309 | 1275 | ||
1310 | /* Testing shows that buffer offset must be at location of Buffer[0] */ | 1276 | /* Testing shows that buffer offset must be at location of Buffer[0] */ |
1311 | req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req) | 1277 | req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req) |
1312 | - 1 /* pad */ - 4 /* do not count rfc1001 len field */); | 1278 | - 1 /* pad */); |
1313 | req->PathLength = cpu_to_le16(unc_path_len - 2); | 1279 | req->PathLength = cpu_to_le16(unc_path_len - 2); |
1314 | iov[1].iov_base = unc_path; | 1280 | iov[1].iov_base = unc_path; |
1315 | iov[1].iov_len = unc_path_len; | 1281 | iov[1].iov_len = unc_path_len; |
1316 | 1282 | ||
1317 | inc_rfc1001_len(req, unc_path_len - 1 /* pad */); | 1283 | rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); |
1318 | |||
1319 | rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); | ||
1320 | cifs_small_buf_release(req); | 1284 | cifs_small_buf_release(req); |
1321 | rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; | 1285 | rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; |
1322 | 1286 | ||
@@ -1328,21 +1292,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
1328 | goto tcon_error_exit; | 1292 | goto tcon_error_exit; |
1329 | } | 1293 | } |
1330 | 1294 | ||
1331 | if (tcon == NULL) { | ||
1332 | ses->ipc_tid = rsp->hdr.sync_hdr.TreeId; | ||
1333 | goto tcon_exit; | ||
1334 | } | ||
1335 | |||
1336 | switch (rsp->ShareType) { | 1295 | switch (rsp->ShareType) { |
1337 | case SMB2_SHARE_TYPE_DISK: | 1296 | case SMB2_SHARE_TYPE_DISK: |
1338 | cifs_dbg(FYI, "connection to disk share\n"); | 1297 | cifs_dbg(FYI, "connection to disk share\n"); |
1339 | break; | 1298 | break; |
1340 | case SMB2_SHARE_TYPE_PIPE: | 1299 | case SMB2_SHARE_TYPE_PIPE: |
1341 | tcon->ipc = true; | 1300 | tcon->pipe = true; |
1342 | cifs_dbg(FYI, "connection to pipe share\n"); | 1301 | cifs_dbg(FYI, "connection to pipe share\n"); |
1343 | break; | 1302 | break; |
1344 | case SMB2_SHARE_TYPE_PRINT: | 1303 | case SMB2_SHARE_TYPE_PRINT: |
1345 | tcon->ipc = true; | 1304 | tcon->print = true; |
1346 | cifs_dbg(FYI, "connection to printer\n"); | 1305 | cifs_dbg(FYI, "connection to printer\n"); |
1347 | break; | 1306 | break; |
1348 | default: | 1307 | default: |
@@ -1389,6 +1348,10 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) | |||
1389 | int rc = 0; | 1348 | int rc = 0; |
1390 | struct cifs_ses *ses = tcon->ses; | 1349 | struct cifs_ses *ses = tcon->ses; |
1391 | int flags = 0; | 1350 | int flags = 0; |
1351 | unsigned int total_len; | ||
1352 | struct kvec iov[1]; | ||
1353 | struct kvec rsp_iov; | ||
1354 | int resp_buf_type; | ||
1392 | 1355 | ||
1393 | cifs_dbg(FYI, "Tree Disconnect\n"); | 1356 | cifs_dbg(FYI, "Tree Disconnect\n"); |
1394 | 1357 | ||
@@ -1398,14 +1361,20 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon) | |||
1398 | if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) | 1361 | if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) |
1399 | return 0; | 1362 | return 0; |
1400 | 1363 | ||
1401 | rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req); | 1364 | rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req, |
1365 | &total_len); | ||
1402 | if (rc) | 1366 | if (rc) |
1403 | return rc; | 1367 | return rc; |
1404 | 1368 | ||
1405 | if (encryption_required(tcon)) | 1369 | if (encryption_required(tcon)) |
1406 | flags |= CIFS_TRANSFORM_REQ; | 1370 | flags |= CIFS_TRANSFORM_REQ; |
1407 | 1371 | ||
1408 | rc = SendReceiveNoRsp(xid, ses, (char *)req, flags); | 1372 | flags |= CIFS_NO_RESP; |
1373 | |||
1374 | iov[0].iov_base = (char *)req; | ||
1375 | iov[0].iov_len = total_len; | ||
1376 | |||
1377 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); | ||
1409 | cifs_small_buf_release(req); | 1378 | cifs_small_buf_release(req); |
1410 | if (rc) | 1379 | if (rc) |
1411 | cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); | 1380 | cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE); |
@@ -1505,11 +1474,10 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov, | |||
1505 | req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; | 1474 | req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; |
1506 | if (!req->CreateContextsOffset) | 1475 | if (!req->CreateContextsOffset) |
1507 | req->CreateContextsOffset = cpu_to_le32( | 1476 | req->CreateContextsOffset = cpu_to_le32( |
1508 | sizeof(struct smb2_create_req) - 4 + | 1477 | sizeof(struct smb2_create_req) + |
1509 | iov[num - 1].iov_len); | 1478 | iov[num - 1].iov_len); |
1510 | le32_add_cpu(&req->CreateContextsLength, | 1479 | le32_add_cpu(&req->CreateContextsLength, |
1511 | server->vals->create_lease_size); | 1480 | server->vals->create_lease_size); |
1512 | inc_rfc1001_len(&req->hdr, server->vals->create_lease_size); | ||
1513 | *num_iovec = num + 1; | 1481 | *num_iovec = num + 1; |
1514 | return 0; | 1482 | return 0; |
1515 | } | 1483 | } |
@@ -1589,10 +1557,9 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec, | |||
1589 | iov[num].iov_len = sizeof(struct create_durable_v2); | 1557 | iov[num].iov_len = sizeof(struct create_durable_v2); |
1590 | if (!req->CreateContextsOffset) | 1558 | if (!req->CreateContextsOffset) |
1591 | req->CreateContextsOffset = | 1559 | req->CreateContextsOffset = |
1592 | cpu_to_le32(sizeof(struct smb2_create_req) - 4 + | 1560 | cpu_to_le32(sizeof(struct smb2_create_req) + |
1593 | iov[1].iov_len); | 1561 | iov[1].iov_len); |
1594 | le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2)); | 1562 | le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2)); |
1595 | inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2)); | ||
1596 | *num_iovec = num + 1; | 1563 | *num_iovec = num + 1; |
1597 | return 0; | 1564 | return 0; |
1598 | } | 1565 | } |
@@ -1613,12 +1580,10 @@ add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec, | |||
1613 | iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); | 1580 | iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2); |
1614 | if (!req->CreateContextsOffset) | 1581 | if (!req->CreateContextsOffset) |
1615 | req->CreateContextsOffset = | 1582 | req->CreateContextsOffset = |
1616 | cpu_to_le32(sizeof(struct smb2_create_req) - 4 + | 1583 | cpu_to_le32(sizeof(struct smb2_create_req) + |
1617 | iov[1].iov_len); | 1584 | iov[1].iov_len); |
1618 | le32_add_cpu(&req->CreateContextsLength, | 1585 | le32_add_cpu(&req->CreateContextsLength, |
1619 | sizeof(struct create_durable_handle_reconnect_v2)); | 1586 | sizeof(struct create_durable_handle_reconnect_v2)); |
1620 | inc_rfc1001_len(&req->hdr, | ||
1621 | sizeof(struct create_durable_handle_reconnect_v2)); | ||
1622 | *num_iovec = num + 1; | 1587 | *num_iovec = num + 1; |
1623 | return 0; | 1588 | return 0; |
1624 | } | 1589 | } |
@@ -1649,10 +1614,9 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec, | |||
1649 | iov[num].iov_len = sizeof(struct create_durable); | 1614 | iov[num].iov_len = sizeof(struct create_durable); |
1650 | if (!req->CreateContextsOffset) | 1615 | if (!req->CreateContextsOffset) |
1651 | req->CreateContextsOffset = | 1616 | req->CreateContextsOffset = |
1652 | cpu_to_le32(sizeof(struct smb2_create_req) - 4 + | 1617 | cpu_to_le32(sizeof(struct smb2_create_req) + |
1653 | iov[1].iov_len); | 1618 | iov[1].iov_len); |
1654 | le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable)); | 1619 | le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable)); |
1655 | inc_rfc1001_len(&req->hdr, sizeof(struct create_durable)); | ||
1656 | *num_iovec = num + 1; | 1620 | *num_iovec = num + 1; |
1657 | return 0; | 1621 | return 0; |
1658 | } | 1622 | } |
@@ -1723,6 +1687,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1723 | __u32 file_attributes = 0; | 1687 | __u32 file_attributes = 0; |
1724 | char *dhc_buf = NULL, *lc_buf = NULL; | 1688 | char *dhc_buf = NULL, *lc_buf = NULL; |
1725 | int flags = 0; | 1689 | int flags = 0; |
1690 | unsigned int total_len; | ||
1726 | 1691 | ||
1727 | cifs_dbg(FYI, "create/open\n"); | 1692 | cifs_dbg(FYI, "create/open\n"); |
1728 | 1693 | ||
@@ -1731,7 +1696,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1731 | else | 1696 | else |
1732 | return -EIO; | 1697 | return -EIO; |
1733 | 1698 | ||
1734 | rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req); | 1699 | rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len); |
1700 | |||
1735 | if (rc) | 1701 | if (rc) |
1736 | return rc; | 1702 | return rc; |
1737 | 1703 | ||
@@ -1752,12 +1718,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1752 | req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); | 1718 | req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); |
1753 | 1719 | ||
1754 | iov[0].iov_base = (char *)req; | 1720 | iov[0].iov_base = (char *)req; |
1755 | /* 4 for rfc1002 length field */ | ||
1756 | iov[0].iov_len = get_rfc1002_length(req) + 4; | ||
1757 | /* -1 since last byte is buf[0] which is sent below (path) */ | 1721 | /* -1 since last byte is buf[0] which is sent below (path) */ |
1758 | iov[0].iov_len--; | 1722 | iov[0].iov_len = total_len - 1; |
1759 | 1723 | ||
1760 | req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); | 1724 | req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); |
1761 | 1725 | ||
1762 | /* [MS-SMB2] 2.2.13 NameOffset: | 1726 | /* [MS-SMB2] 2.2.13 NameOffset: |
1763 | * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of | 1727 | * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of |
@@ -1770,7 +1734,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1770 | if (tcon->share_flags & SHI1005_FLAGS_DFS) { | 1734 | if (tcon->share_flags & SHI1005_FLAGS_DFS) { |
1771 | int name_len; | 1735 | int name_len; |
1772 | 1736 | ||
1773 | req->hdr.sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; | 1737 | req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS; |
1774 | rc = alloc_path_with_tree_prefix(©_path, ©_size, | 1738 | rc = alloc_path_with_tree_prefix(©_path, ©_size, |
1775 | &name_len, | 1739 | &name_len, |
1776 | tcon->treeName, path); | 1740 | tcon->treeName, path); |
@@ -1797,8 +1761,6 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1797 | 1761 | ||
1798 | iov[1].iov_len = uni_path_len; | 1762 | iov[1].iov_len = uni_path_len; |
1799 | iov[1].iov_base = path; | 1763 | iov[1].iov_base = path; |
1800 | /* -1 since last byte is buf[0] which was counted in smb2_buf_len */ | ||
1801 | inc_rfc1001_len(req, uni_path_len - 1); | ||
1802 | 1764 | ||
1803 | if (!server->oplocks) | 1765 | if (!server->oplocks) |
1804 | *oplock = SMB2_OPLOCK_LEVEL_NONE; | 1766 | *oplock = SMB2_OPLOCK_LEVEL_NONE; |
@@ -1836,7 +1798,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1836 | dhc_buf = iov[n_iov-1].iov_base; | 1798 | dhc_buf = iov[n_iov-1].iov_base; |
1837 | } | 1799 | } |
1838 | 1800 | ||
1839 | rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); | 1801 | rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags, |
1802 | &rsp_iov); | ||
1840 | cifs_small_buf_release(req); | 1803 | cifs_small_buf_release(req); |
1841 | rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; | 1804 | rsp = (struct smb2_create_rsp *)rsp_iov.iov_base; |
1842 | 1805 | ||
@@ -1877,7 +1840,7 @@ creat_exit: | |||
1877 | */ | 1840 | */ |
1878 | int | 1841 | int |
1879 | SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | 1842 | SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, |
1880 | u64 volatile_fid, u32 opcode, bool is_fsctl, bool use_ipc, | 1843 | u64 volatile_fid, u32 opcode, bool is_fsctl, |
1881 | char *in_data, u32 indatalen, | 1844 | char *in_data, u32 indatalen, |
1882 | char **out_data, u32 *plen /* returned data len */) | 1845 | char **out_data, u32 *plen /* returned data len */) |
1883 | { | 1846 | { |
@@ -1891,6 +1854,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1891 | int n_iov; | 1854 | int n_iov; |
1892 | int rc = 0; | 1855 | int rc = 0; |
1893 | int flags = 0; | 1856 | int flags = 0; |
1857 | unsigned int total_len; | ||
1894 | 1858 | ||
1895 | cifs_dbg(FYI, "SMB2 IOCTL\n"); | 1859 | cifs_dbg(FYI, "SMB2 IOCTL\n"); |
1896 | 1860 | ||
@@ -1909,20 +1873,10 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1909 | if (!ses || !(ses->server)) | 1873 | if (!ses || !(ses->server)) |
1910 | return -EIO; | 1874 | return -EIO; |
1911 | 1875 | ||
1912 | rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req); | 1876 | rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len); |
1913 | if (rc) | 1877 | if (rc) |
1914 | return rc; | 1878 | return rc; |
1915 | 1879 | ||
1916 | if (use_ipc) { | ||
1917 | if (ses->ipc_tid == 0) { | ||
1918 | cifs_small_buf_release(req); | ||
1919 | return -ENOTCONN; | ||
1920 | } | ||
1921 | |||
1922 | cifs_dbg(FYI, "replacing tid 0x%x with IPC tid 0x%x\n", | ||
1923 | req->hdr.sync_hdr.TreeId, ses->ipc_tid); | ||
1924 | req->hdr.sync_hdr.TreeId = ses->ipc_tid; | ||
1925 | } | ||
1926 | if (encryption_required(tcon)) | 1880 | if (encryption_required(tcon)) |
1927 | flags |= CIFS_TRANSFORM_REQ; | 1881 | flags |= CIFS_TRANSFORM_REQ; |
1928 | 1882 | ||
@@ -1934,7 +1888,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1934 | req->InputCount = cpu_to_le32(indatalen); | 1888 | req->InputCount = cpu_to_le32(indatalen); |
1935 | /* do not set InputOffset if no input data */ | 1889 | /* do not set InputOffset if no input data */ |
1936 | req->InputOffset = | 1890 | req->InputOffset = |
1937 | cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4); | 1891 | cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer)); |
1938 | iov[1].iov_base = in_data; | 1892 | iov[1].iov_base = in_data; |
1939 | iov[1].iov_len = indatalen; | 1893 | iov[1].iov_len = indatalen; |
1940 | n_iov = 2; | 1894 | n_iov = 2; |
@@ -1969,21 +1923,20 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1969 | * but if input data passed to ioctl, we do not | 1923 | * but if input data passed to ioctl, we do not |
1970 | * want to double count this, so we do not send | 1924 | * want to double count this, so we do not send |
1971 | * the dummy one byte of data in iovec[0] if sending | 1925 | * the dummy one byte of data in iovec[0] if sending |
1972 | * input data (in iovec[1]). We also must add 4 bytes | 1926 | * input data (in iovec[1]). |
1973 | * in first iovec to allow for rfc1002 length field. | ||
1974 | */ | 1927 | */ |
1975 | 1928 | ||
1976 | if (indatalen) { | 1929 | if (indatalen) { |
1977 | iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; | 1930 | iov[0].iov_len = total_len - 1; |
1978 | inc_rfc1001_len(req, indatalen - 1); | ||
1979 | } else | 1931 | } else |
1980 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 1932 | iov[0].iov_len = total_len; |
1981 | 1933 | ||
1982 | /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ | 1934 | /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ |
1983 | if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) | 1935 | if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) |
1984 | req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; | 1936 | req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED; |
1985 | 1937 | ||
1986 | rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); | 1938 | rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags, |
1939 | &rsp_iov); | ||
1987 | cifs_small_buf_release(req); | 1940 | cifs_small_buf_release(req); |
1988 | rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; | 1941 | rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base; |
1989 | 1942 | ||
@@ -2052,7 +2005,6 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, | |||
2052 | 2005 | ||
2053 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, | 2006 | rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, |
2054 | FSCTL_SET_COMPRESSION, true /* is_fsctl */, | 2007 | FSCTL_SET_COMPRESSION, true /* is_fsctl */, |
2055 | false /* use_ipc */, | ||
2056 | (char *)&fsctl_input /* data input */, | 2008 | (char *)&fsctl_input /* data input */, |
2057 | 2 /* in data len */, &ret_data /* out data */, NULL); | 2009 | 2 /* in data len */, &ret_data /* out data */, NULL); |
2058 | 2010 | ||
@@ -2073,13 +2025,14 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
2073 | int resp_buftype; | 2025 | int resp_buftype; |
2074 | int rc = 0; | 2026 | int rc = 0; |
2075 | int flags = 0; | 2027 | int flags = 0; |
2028 | unsigned int total_len; | ||
2076 | 2029 | ||
2077 | cifs_dbg(FYI, "Close\n"); | 2030 | cifs_dbg(FYI, "Close\n"); |
2078 | 2031 | ||
2079 | if (!ses || !(ses->server)) | 2032 | if (!ses || !(ses->server)) |
2080 | return -EIO; | 2033 | return -EIO; |
2081 | 2034 | ||
2082 | rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req); | 2035 | rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len); |
2083 | if (rc) | 2036 | if (rc) |
2084 | return rc; | 2037 | return rc; |
2085 | 2038 | ||
@@ -2090,10 +2043,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
2090 | req->VolatileFileId = volatile_fid; | 2043 | req->VolatileFileId = volatile_fid; |
2091 | 2044 | ||
2092 | iov[0].iov_base = (char *)req; | 2045 | iov[0].iov_base = (char *)req; |
2093 | /* 4 for rfc1002 length field */ | 2046 | iov[0].iov_len = total_len; |
2094 | iov[0].iov_len = get_rfc1002_length(req) + 4; | ||
2095 | 2047 | ||
2096 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); | 2048 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
2097 | cifs_small_buf_release(req); | 2049 | cifs_small_buf_release(req); |
2098 | rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; | 2050 | rsp = (struct smb2_close_rsp *)rsp_iov.iov_base; |
2099 | 2051 | ||
@@ -2180,13 +2132,15 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
2180 | int resp_buftype; | 2132 | int resp_buftype; |
2181 | struct cifs_ses *ses = tcon->ses; | 2133 | struct cifs_ses *ses = tcon->ses; |
2182 | int flags = 0; | 2134 | int flags = 0; |
2135 | unsigned int total_len; | ||
2183 | 2136 | ||
2184 | cifs_dbg(FYI, "Query Info\n"); | 2137 | cifs_dbg(FYI, "Query Info\n"); |
2185 | 2138 | ||
2186 | if (!ses || !(ses->server)) | 2139 | if (!ses || !(ses->server)) |
2187 | return -EIO; | 2140 | return -EIO; |
2188 | 2141 | ||
2189 | rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); | 2142 | rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req, |
2143 | &total_len); | ||
2190 | if (rc) | 2144 | if (rc) |
2191 | return rc; | 2145 | return rc; |
2192 | 2146 | ||
@@ -2203,15 +2157,14 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
2203 | * We do not use the input buffer (do not send extra byte) | 2157 | * We do not use the input buffer (do not send extra byte) |
2204 | */ | 2158 | */ |
2205 | req->InputBufferOffset = 0; | 2159 | req->InputBufferOffset = 0; |
2206 | inc_rfc1001_len(req, -1); | ||
2207 | 2160 | ||
2208 | req->OutputBufferLength = cpu_to_le32(output_len); | 2161 | req->OutputBufferLength = cpu_to_le32(output_len); |
2209 | 2162 | ||
2210 | iov[0].iov_base = (char *)req; | 2163 | iov[0].iov_base = (char *)req; |
2211 | /* 4 for rfc1002 length field */ | 2164 | /* 1 for Buffer */ |
2212 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 2165 | iov[0].iov_len = total_len - 1; |
2213 | 2166 | ||
2214 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); | 2167 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
2215 | cifs_small_buf_release(req); | 2168 | cifs_small_buf_release(req); |
2216 | rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; | 2169 | rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base; |
2217 | 2170 | ||
@@ -2338,6 +2291,10 @@ void smb2_reconnect_server(struct work_struct *work) | |||
2338 | tcon_exist = true; | 2291 | tcon_exist = true; |
2339 | } | 2292 | } |
2340 | } | 2293 | } |
2294 | if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { | ||
2295 | list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); | ||
2296 | tcon_exist = true; | ||
2297 | } | ||
2341 | } | 2298 | } |
2342 | /* | 2299 | /* |
2343 | * Get the reference to server struct to be sure that the last call of | 2300 | * Get the reference to server struct to be sure that the last call of |
@@ -2376,6 +2333,8 @@ SMB2_echo(struct TCP_Server_Info *server) | |||
2376 | struct kvec iov[2]; | 2333 | struct kvec iov[2]; |
2377 | struct smb_rqst rqst = { .rq_iov = iov, | 2334 | struct smb_rqst rqst = { .rq_iov = iov, |
2378 | .rq_nvec = 2 }; | 2335 | .rq_nvec = 2 }; |
2336 | unsigned int total_len; | ||
2337 | __be32 rfc1002_marker; | ||
2379 | 2338 | ||
2380 | cifs_dbg(FYI, "In echo request\n"); | 2339 | cifs_dbg(FYI, "In echo request\n"); |
2381 | 2340 | ||
@@ -2385,17 +2344,17 @@ SMB2_echo(struct TCP_Server_Info *server) | |||
2385 | return rc; | 2344 | return rc; |
2386 | } | 2345 | } |
2387 | 2346 | ||
2388 | rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); | 2347 | rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len); |
2389 | if (rc) | 2348 | if (rc) |
2390 | return rc; | 2349 | return rc; |
2391 | 2350 | ||
2392 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); | 2351 | req->sync_hdr.CreditRequest = cpu_to_le16(1); |
2393 | 2352 | ||
2394 | /* 4 for rfc1002 length field */ | ||
2395 | iov[0].iov_len = 4; | 2353 | iov[0].iov_len = 4; |
2396 | iov[0].iov_base = (char *)req; | 2354 | rfc1002_marker = cpu_to_be32(total_len); |
2397 | iov[1].iov_len = get_rfc1002_length(req); | 2355 | iov[0].iov_base = &rfc1002_marker; |
2398 | iov[1].iov_base = (char *)req + 4; | 2356 | iov[1].iov_len = total_len; |
2357 | iov[1].iov_base = (char *)req; | ||
2399 | 2358 | ||
2400 | rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, | 2359 | rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL, |
2401 | server, CIFS_ECHO_OP); | 2360 | server, CIFS_ECHO_OP); |
@@ -2417,13 +2376,14 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
2417 | int resp_buftype; | 2376 | int resp_buftype; |
2418 | int rc = 0; | 2377 | int rc = 0; |
2419 | int flags = 0; | 2378 | int flags = 0; |
2379 | unsigned int total_len; | ||
2420 | 2380 | ||
2421 | cifs_dbg(FYI, "Flush\n"); | 2381 | cifs_dbg(FYI, "Flush\n"); |
2422 | 2382 | ||
2423 | if (!ses || !(ses->server)) | 2383 | if (!ses || !(ses->server)) |
2424 | return -EIO; | 2384 | return -EIO; |
2425 | 2385 | ||
2426 | rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req); | 2386 | rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len); |
2427 | if (rc) | 2387 | if (rc) |
2428 | return rc; | 2388 | return rc; |
2429 | 2389 | ||
@@ -2434,10 +2394,9 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
2434 | req->VolatileFileId = volatile_fid; | 2394 | req->VolatileFileId = volatile_fid; |
2435 | 2395 | ||
2436 | iov[0].iov_base = (char *)req; | 2396 | iov[0].iov_base = (char *)req; |
2437 | /* 4 for rfc1002 length field */ | 2397 | iov[0].iov_len = total_len; |
2438 | iov[0].iov_len = get_rfc1002_length(req) + 4; | ||
2439 | 2398 | ||
2440 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); | 2399 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
2441 | cifs_small_buf_release(req); | 2400 | cifs_small_buf_release(req); |
2442 | 2401 | ||
2443 | if (rc != 0) | 2402 | if (rc != 0) |
@@ -2453,18 +2412,21 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
2453 | */ | 2412 | */ |
2454 | static int | 2413 | static int |
2455 | smb2_new_read_req(void **buf, unsigned int *total_len, | 2414 | smb2_new_read_req(void **buf, unsigned int *total_len, |
2456 | struct cifs_io_parms *io_parms, unsigned int remaining_bytes, | 2415 | struct cifs_io_parms *io_parms, struct cifs_readdata *rdata, |
2457 | int request_type) | 2416 | unsigned int remaining_bytes, int request_type) |
2458 | { | 2417 | { |
2459 | int rc = -EACCES; | 2418 | int rc = -EACCES; |
2460 | struct smb2_read_plain_req *req = NULL; | 2419 | struct smb2_read_plain_req *req = NULL; |
2461 | struct smb2_sync_hdr *shdr; | 2420 | struct smb2_sync_hdr *shdr; |
2421 | struct TCP_Server_Info *server; | ||
2462 | 2422 | ||
2463 | rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req, | 2423 | rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req, |
2464 | total_len); | 2424 | total_len); |
2465 | if (rc) | 2425 | if (rc) |
2466 | return rc; | 2426 | return rc; |
2467 | if (io_parms->tcon->ses->server == NULL) | 2427 | |
2428 | server = io_parms->tcon->ses->server; | ||
2429 | if (server == NULL) | ||
2468 | return -ECONNABORTED; | 2430 | return -ECONNABORTED; |
2469 | 2431 | ||
2470 | shdr = &req->sync_hdr; | 2432 | shdr = &req->sync_hdr; |
@@ -2478,7 +2440,40 @@ smb2_new_read_req(void **buf, unsigned int *total_len, | |||
2478 | req->MinimumCount = 0; | 2440 | req->MinimumCount = 0; |
2479 | req->Length = cpu_to_le32(io_parms->length); | 2441 | req->Length = cpu_to_le32(io_parms->length); |
2480 | req->Offset = cpu_to_le64(io_parms->offset); | 2442 | req->Offset = cpu_to_le64(io_parms->offset); |
2481 | 2443 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
2444 | /* | ||
2445 | * If we want to do a RDMA write, fill in and append | ||
2446 | * smbd_buffer_descriptor_v1 to the end of read request | ||
2447 | */ | ||
2448 | if (server->rdma && rdata && | ||
2449 | rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { | ||
2450 | |||
2451 | struct smbd_buffer_descriptor_v1 *v1; | ||
2452 | bool need_invalidate = | ||
2453 | io_parms->tcon->ses->server->dialect == SMB30_PROT_ID; | ||
2454 | |||
2455 | rdata->mr = smbd_register_mr( | ||
2456 | server->smbd_conn, rdata->pages, | ||
2457 | rdata->nr_pages, rdata->tailsz, | ||
2458 | true, need_invalidate); | ||
2459 | if (!rdata->mr) | ||
2460 | return -ENOBUFS; | ||
2461 | |||
2462 | req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; | ||
2463 | if (need_invalidate) | ||
2464 | req->Channel = SMB2_CHANNEL_RDMA_V1; | ||
2465 | req->ReadChannelInfoOffset = | ||
2466 | cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer)); | ||
2467 | req->ReadChannelInfoLength = | ||
2468 | cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); | ||
2469 | v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; | ||
2470 | v1->offset = cpu_to_le64(rdata->mr->mr->iova); | ||
2471 | v1->token = cpu_to_le32(rdata->mr->mr->rkey); | ||
2472 | v1->length = cpu_to_le32(rdata->mr->mr->length); | ||
2473 | |||
2474 | *total_len += sizeof(*v1) - 1; | ||
2475 | } | ||
2476 | #endif | ||
2482 | if (request_type & CHAINED_REQUEST) { | 2477 | if (request_type & CHAINED_REQUEST) { |
2483 | if (!(request_type & END_OF_CHAIN)) { | 2478 | if (!(request_type & END_OF_CHAIN)) { |
2484 | /* next 8-byte aligned request */ | 2479 | /* next 8-byte aligned request */ |
@@ -2557,7 +2552,17 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
2557 | if (rdata->result != -ENODATA) | 2552 | if (rdata->result != -ENODATA) |
2558 | rdata->result = -EIO; | 2553 | rdata->result = -EIO; |
2559 | } | 2554 | } |
2560 | 2555 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
2556 | /* | ||
2557 | * If this rdata has a memmory registered, the MR can be freed | ||
2558 | * MR needs to be freed as soon as I/O finishes to prevent deadlock | ||
2559 | * because they have limited number and are used for future I/Os | ||
2560 | */ | ||
2561 | if (rdata->mr) { | ||
2562 | smbd_deregister_mr(rdata->mr); | ||
2563 | rdata->mr = NULL; | ||
2564 | } | ||
2565 | #endif | ||
2561 | if (rdata->result) | 2566 | if (rdata->result) |
2562 | cifs_stats_fail_inc(tcon, SMB2_READ_HE); | 2567 | cifs_stats_fail_inc(tcon, SMB2_READ_HE); |
2563 | 2568 | ||
@@ -2592,7 +2597,8 @@ smb2_async_readv(struct cifs_readdata *rdata) | |||
2592 | 2597 | ||
2593 | server = io_parms.tcon->ses->server; | 2598 | server = io_parms.tcon->ses->server; |
2594 | 2599 | ||
2595 | rc = smb2_new_read_req((void **) &buf, &total_len, &io_parms, 0, 0); | 2600 | rc = smb2_new_read_req( |
2601 | (void **) &buf, &total_len, &io_parms, rdata, 0, 0); | ||
2596 | if (rc) { | 2602 | if (rc) { |
2597 | if (rc == -EAGAIN && rdata->credits) { | 2603 | if (rc == -EAGAIN && rdata->credits) { |
2598 | /* credits was reset by reconnect */ | 2604 | /* credits was reset by reconnect */ |
@@ -2650,31 +2656,24 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
2650 | struct smb2_read_plain_req *req = NULL; | 2656 | struct smb2_read_plain_req *req = NULL; |
2651 | struct smb2_read_rsp *rsp = NULL; | 2657 | struct smb2_read_rsp *rsp = NULL; |
2652 | struct smb2_sync_hdr *shdr; | 2658 | struct smb2_sync_hdr *shdr; |
2653 | struct kvec iov[2]; | 2659 | struct kvec iov[1]; |
2654 | struct kvec rsp_iov; | 2660 | struct kvec rsp_iov; |
2655 | unsigned int total_len; | 2661 | unsigned int total_len; |
2656 | __be32 req_len; | ||
2657 | struct smb_rqst rqst = { .rq_iov = iov, | ||
2658 | .rq_nvec = 2 }; | ||
2659 | int flags = CIFS_LOG_ERROR; | 2662 | int flags = CIFS_LOG_ERROR; |
2660 | struct cifs_ses *ses = io_parms->tcon->ses; | 2663 | struct cifs_ses *ses = io_parms->tcon->ses; |
2661 | 2664 | ||
2662 | *nbytes = 0; | 2665 | *nbytes = 0; |
2663 | rc = smb2_new_read_req((void **)&req, &total_len, io_parms, 0, 0); | 2666 | rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0); |
2664 | if (rc) | 2667 | if (rc) |
2665 | return rc; | 2668 | return rc; |
2666 | 2669 | ||
2667 | if (encryption_required(io_parms->tcon)) | 2670 | if (encryption_required(io_parms->tcon)) |
2668 | flags |= CIFS_TRANSFORM_REQ; | 2671 | flags |= CIFS_TRANSFORM_REQ; |
2669 | 2672 | ||
2670 | req_len = cpu_to_be32(total_len); | 2673 | iov[0].iov_base = (char *)req; |
2671 | 2674 | iov[0].iov_len = total_len; | |
2672 | iov[0].iov_base = &req_len; | ||
2673 | iov[0].iov_len = sizeof(__be32); | ||
2674 | iov[1].iov_base = req; | ||
2675 | iov[1].iov_len = total_len; | ||
2676 | 2675 | ||
2677 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); | 2676 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov); |
2678 | cifs_small_buf_release(req); | 2677 | cifs_small_buf_release(req); |
2679 | 2678 | ||
2680 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; | 2679 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; |
@@ -2755,7 +2754,19 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
2755 | wdata->result = -EIO; | 2754 | wdata->result = -EIO; |
2756 | break; | 2755 | break; |
2757 | } | 2756 | } |
2758 | 2757 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
2758 | /* | ||
2759 | * If this wdata has a memory registered, the MR can be freed | ||
2760 | * The number of MRs available is limited, it's important to recover | ||
2761 | * used MR as soon as I/O is finished. Hold MR longer in the later | ||
2762 | * I/O process can possibly result in I/O deadlock due to lack of MR | ||
2763 | * to send request on I/O retry | ||
2764 | */ | ||
2765 | if (wdata->mr) { | ||
2766 | smbd_deregister_mr(wdata->mr); | ||
2767 | wdata->mr = NULL; | ||
2768 | } | ||
2769 | #endif | ||
2759 | if (wdata->result) | 2770 | if (wdata->result) |
2760 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); | 2771 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); |
2761 | 2772 | ||
@@ -2776,8 +2787,10 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
2776 | struct TCP_Server_Info *server = tcon->ses->server; | 2787 | struct TCP_Server_Info *server = tcon->ses->server; |
2777 | struct kvec iov[2]; | 2788 | struct kvec iov[2]; |
2778 | struct smb_rqst rqst = { }; | 2789 | struct smb_rqst rqst = { }; |
2790 | unsigned int total_len; | ||
2791 | __be32 rfc1002_marker; | ||
2779 | 2792 | ||
2780 | rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); | 2793 | rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len); |
2781 | if (rc) { | 2794 | if (rc) { |
2782 | if (rc == -EAGAIN && wdata->credits) { | 2795 | if (rc == -EAGAIN && wdata->credits) { |
2783 | /* credits was reset by reconnect */ | 2796 | /* credits was reset by reconnect */ |
@@ -2793,7 +2806,7 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
2793 | if (encryption_required(tcon)) | 2806 | if (encryption_required(tcon)) |
2794 | flags |= CIFS_TRANSFORM_REQ; | 2807 | flags |= CIFS_TRANSFORM_REQ; |
2795 | 2808 | ||
2796 | shdr = get_sync_hdr(req); | 2809 | shdr = (struct smb2_sync_hdr *)req; |
2797 | shdr->ProcessId = cpu_to_le32(wdata->cfile->pid); | 2810 | shdr->ProcessId = cpu_to_le32(wdata->cfile->pid); |
2798 | 2811 | ||
2799 | req->PersistentFileId = wdata->cfile->fid.persistent_fid; | 2812 | req->PersistentFileId = wdata->cfile->fid.persistent_fid; |
@@ -2802,16 +2815,51 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
2802 | req->WriteChannelInfoLength = 0; | 2815 | req->WriteChannelInfoLength = 0; |
2803 | req->Channel = 0; | 2816 | req->Channel = 0; |
2804 | req->Offset = cpu_to_le64(wdata->offset); | 2817 | req->Offset = cpu_to_le64(wdata->offset); |
2805 | /* 4 for rfc1002 length field */ | ||
2806 | req->DataOffset = cpu_to_le16( | 2818 | req->DataOffset = cpu_to_le16( |
2807 | offsetof(struct smb2_write_req, Buffer) - 4); | 2819 | offsetof(struct smb2_write_req, Buffer)); |
2808 | req->RemainingBytes = 0; | 2820 | req->RemainingBytes = 0; |
2809 | 2821 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
2822 | /* | ||
2823 | * If we want to do a server RDMA read, fill in and append | ||
2824 | * smbd_buffer_descriptor_v1 to the end of write request | ||
2825 | */ | ||
2826 | if (server->rdma && wdata->bytes >= | ||
2827 | server->smbd_conn->rdma_readwrite_threshold) { | ||
2828 | |||
2829 | struct smbd_buffer_descriptor_v1 *v1; | ||
2830 | bool need_invalidate = server->dialect == SMB30_PROT_ID; | ||
2831 | |||
2832 | wdata->mr = smbd_register_mr( | ||
2833 | server->smbd_conn, wdata->pages, | ||
2834 | wdata->nr_pages, wdata->tailsz, | ||
2835 | false, need_invalidate); | ||
2836 | if (!wdata->mr) { | ||
2837 | rc = -ENOBUFS; | ||
2838 | goto async_writev_out; | ||
2839 | } | ||
2840 | req->Length = 0; | ||
2841 | req->DataOffset = 0; | ||
2842 | req->RemainingBytes = | ||
2843 | cpu_to_le32((wdata->nr_pages-1)*PAGE_SIZE + wdata->tailsz); | ||
2844 | req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE; | ||
2845 | if (need_invalidate) | ||
2846 | req->Channel = SMB2_CHANNEL_RDMA_V1; | ||
2847 | req->WriteChannelInfoOffset = | ||
2848 | cpu_to_le16(offsetof(struct smb2_write_req, Buffer)); | ||
2849 | req->WriteChannelInfoLength = | ||
2850 | cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1)); | ||
2851 | v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0]; | ||
2852 | v1->offset = cpu_to_le64(wdata->mr->mr->iova); | ||
2853 | v1->token = cpu_to_le32(wdata->mr->mr->rkey); | ||
2854 | v1->length = cpu_to_le32(wdata->mr->mr->length); | ||
2855 | } | ||
2856 | #endif | ||
2810 | /* 4 for rfc1002 length field and 1 for Buffer */ | 2857 | /* 4 for rfc1002 length field and 1 for Buffer */ |
2811 | iov[0].iov_len = 4; | 2858 | iov[0].iov_len = 4; |
2812 | iov[0].iov_base = req; | 2859 | rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes); |
2813 | iov[1].iov_len = get_rfc1002_length(req) - 1; | 2860 | iov[0].iov_base = &rfc1002_marker; |
2814 | iov[1].iov_base = (char *)req + 4; | 2861 | iov[1].iov_len = total_len - 1; |
2862 | iov[1].iov_base = (char *)req; | ||
2815 | 2863 | ||
2816 | rqst.rq_iov = iov; | 2864 | rqst.rq_iov = iov; |
2817 | rqst.rq_nvec = 2; | 2865 | rqst.rq_nvec = 2; |
@@ -2819,13 +2867,22 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
2819 | rqst.rq_npages = wdata->nr_pages; | 2867 | rqst.rq_npages = wdata->nr_pages; |
2820 | rqst.rq_pagesz = wdata->pagesz; | 2868 | rqst.rq_pagesz = wdata->pagesz; |
2821 | rqst.rq_tailsz = wdata->tailsz; | 2869 | rqst.rq_tailsz = wdata->tailsz; |
2822 | 2870 | #ifdef CONFIG_CIFS_SMB_DIRECT | |
2871 | if (wdata->mr) { | ||
2872 | iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1); | ||
2873 | rqst.rq_npages = 0; | ||
2874 | } | ||
2875 | #endif | ||
2823 | cifs_dbg(FYI, "async write at %llu %u bytes\n", | 2876 | cifs_dbg(FYI, "async write at %llu %u bytes\n", |
2824 | wdata->offset, wdata->bytes); | 2877 | wdata->offset, wdata->bytes); |
2825 | 2878 | ||
2879 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
2880 | /* For RDMA read, I/O size is in RemainingBytes not in Length */ | ||
2881 | if (!wdata->mr) | ||
2882 | req->Length = cpu_to_le32(wdata->bytes); | ||
2883 | #else | ||
2826 | req->Length = cpu_to_le32(wdata->bytes); | 2884 | req->Length = cpu_to_le32(wdata->bytes); |
2827 | 2885 | #endif | |
2828 | inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); | ||
2829 | 2886 | ||
2830 | if (wdata->credits) { | 2887 | if (wdata->credits) { |
2831 | shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, | 2888 | shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, |
@@ -2869,13 +2926,15 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
2869 | int resp_buftype; | 2926 | int resp_buftype; |
2870 | struct kvec rsp_iov; | 2927 | struct kvec rsp_iov; |
2871 | int flags = 0; | 2928 | int flags = 0; |
2929 | unsigned int total_len; | ||
2872 | 2930 | ||
2873 | *nbytes = 0; | 2931 | *nbytes = 0; |
2874 | 2932 | ||
2875 | if (n_vec < 1) | 2933 | if (n_vec < 1) |
2876 | return rc; | 2934 | return rc; |
2877 | 2935 | ||
2878 | rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req); | 2936 | rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req, |
2937 | &total_len); | ||
2879 | if (rc) | 2938 | if (rc) |
2880 | return rc; | 2939 | return rc; |
2881 | 2940 | ||
@@ -2885,7 +2944,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
2885 | if (encryption_required(io_parms->tcon)) | 2944 | if (encryption_required(io_parms->tcon)) |
2886 | flags |= CIFS_TRANSFORM_REQ; | 2945 | flags |= CIFS_TRANSFORM_REQ; |
2887 | 2946 | ||
2888 | req->hdr.sync_hdr.ProcessId = cpu_to_le32(io_parms->pid); | 2947 | req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid); |
2889 | 2948 | ||
2890 | req->PersistentFileId = io_parms->persistent_fid; | 2949 | req->PersistentFileId = io_parms->persistent_fid; |
2891 | req->VolatileFileId = io_parms->volatile_fid; | 2950 | req->VolatileFileId = io_parms->volatile_fid; |
@@ -2894,20 +2953,16 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
2894 | req->Channel = 0; | 2953 | req->Channel = 0; |
2895 | req->Length = cpu_to_le32(io_parms->length); | 2954 | req->Length = cpu_to_le32(io_parms->length); |
2896 | req->Offset = cpu_to_le64(io_parms->offset); | 2955 | req->Offset = cpu_to_le64(io_parms->offset); |
2897 | /* 4 for rfc1002 length field */ | ||
2898 | req->DataOffset = cpu_to_le16( | 2956 | req->DataOffset = cpu_to_le16( |
2899 | offsetof(struct smb2_write_req, Buffer) - 4); | 2957 | offsetof(struct smb2_write_req, Buffer)); |
2900 | req->RemainingBytes = 0; | 2958 | req->RemainingBytes = 0; |
2901 | 2959 | ||
2902 | iov[0].iov_base = (char *)req; | 2960 | iov[0].iov_base = (char *)req; |
2903 | /* 4 for rfc1002 length field and 1 for Buffer */ | 2961 | /* 1 for Buffer */ |
2904 | iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; | 2962 | iov[0].iov_len = total_len - 1; |
2905 | 2963 | ||
2906 | /* length of entire message including data to be written */ | 2964 | rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1, |
2907 | inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); | 2965 | &resp_buftype, flags, &rsp_iov); |
2908 | |||
2909 | rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1, | ||
2910 | &resp_buftype, flags, &rsp_iov); | ||
2911 | cifs_small_buf_release(req); | 2966 | cifs_small_buf_release(req); |
2912 | rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; | 2967 | rsp = (struct smb2_write_rsp *)rsp_iov.iov_base; |
2913 | 2968 | ||
@@ -2984,13 +3039,15 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
2984 | unsigned int output_size = CIFSMaxBufSize; | 3039 | unsigned int output_size = CIFSMaxBufSize; |
2985 | size_t info_buf_size; | 3040 | size_t info_buf_size; |
2986 | int flags = 0; | 3041 | int flags = 0; |
3042 | unsigned int total_len; | ||
2987 | 3043 | ||
2988 | if (ses && (ses->server)) | 3044 | if (ses && (ses->server)) |
2989 | server = ses->server; | 3045 | server = ses->server; |
2990 | else | 3046 | else |
2991 | return -EIO; | 3047 | return -EIO; |
2992 | 3048 | ||
2993 | rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req); | 3049 | rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req, |
3050 | &total_len); | ||
2994 | if (rc) | 3051 | if (rc) |
2995 | return rc; | 3052 | return rc; |
2996 | 3053 | ||
@@ -3022,7 +3079,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
3022 | memcpy(bufptr, &asteriks, len); | 3079 | memcpy(bufptr, &asteriks, len); |
3023 | 3080 | ||
3024 | req->FileNameOffset = | 3081 | req->FileNameOffset = |
3025 | cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4); | 3082 | cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1); |
3026 | req->FileNameLength = cpu_to_le16(len); | 3083 | req->FileNameLength = cpu_to_le16(len); |
3027 | /* | 3084 | /* |
3028 | * BB could be 30 bytes or so longer if we used SMB2 specific | 3085 | * BB could be 30 bytes or so longer if we used SMB2 specific |
@@ -3033,15 +3090,13 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
3033 | req->OutputBufferLength = cpu_to_le32(output_size); | 3090 | req->OutputBufferLength = cpu_to_le32(output_size); |
3034 | 3091 | ||
3035 | iov[0].iov_base = (char *)req; | 3092 | iov[0].iov_base = (char *)req; |
3036 | /* 4 for RFC1001 length and 1 for Buffer */ | 3093 | /* 1 for Buffer */ |
3037 | iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; | 3094 | iov[0].iov_len = total_len - 1; |
3038 | 3095 | ||
3039 | iov[1].iov_base = (char *)(req->Buffer); | 3096 | iov[1].iov_base = (char *)(req->Buffer); |
3040 | iov[1].iov_len = len; | 3097 | iov[1].iov_len = len; |
3041 | 3098 | ||
3042 | inc_rfc1001_len(req, len - 1 /* Buffer */); | 3099 | rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); |
3043 | |||
3044 | rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov); | ||
3045 | cifs_small_buf_release(req); | 3100 | cifs_small_buf_release(req); |
3046 | rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; | 3101 | rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base; |
3047 | 3102 | ||
@@ -3110,6 +3165,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
3110 | unsigned int i; | 3165 | unsigned int i; |
3111 | struct cifs_ses *ses = tcon->ses; | 3166 | struct cifs_ses *ses = tcon->ses; |
3112 | int flags = 0; | 3167 | int flags = 0; |
3168 | unsigned int total_len; | ||
3113 | 3169 | ||
3114 | if (!ses || !(ses->server)) | 3170 | if (!ses || !(ses->server)) |
3115 | return -EIO; | 3171 | return -EIO; |
@@ -3121,7 +3177,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
3121 | if (!iov) | 3177 | if (!iov) |
3122 | return -ENOMEM; | 3178 | return -ENOMEM; |
3123 | 3179 | ||
3124 | rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req); | 3180 | rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len); |
3125 | if (rc) { | 3181 | if (rc) { |
3126 | kfree(iov); | 3182 | kfree(iov); |
3127 | return rc; | 3183 | return rc; |
@@ -3130,7 +3186,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
3130 | if (encryption_required(tcon)) | 3186 | if (encryption_required(tcon)) |
3131 | flags |= CIFS_TRANSFORM_REQ; | 3187 | flags |= CIFS_TRANSFORM_REQ; |
3132 | 3188 | ||
3133 | req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); | 3189 | req->sync_hdr.ProcessId = cpu_to_le32(pid); |
3134 | 3190 | ||
3135 | req->InfoType = info_type; | 3191 | req->InfoType = info_type; |
3136 | req->FileInfoClass = info_class; | 3192 | req->FileInfoClass = info_class; |
@@ -3138,27 +3194,25 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
3138 | req->VolatileFileId = volatile_fid; | 3194 | req->VolatileFileId = volatile_fid; |
3139 | req->AdditionalInformation = cpu_to_le32(additional_info); | 3195 | req->AdditionalInformation = cpu_to_le32(additional_info); |
3140 | 3196 | ||
3141 | /* 4 for RFC1001 length and 1 for Buffer */ | ||
3142 | req->BufferOffset = | 3197 | req->BufferOffset = |
3143 | cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4); | 3198 | cpu_to_le16(sizeof(struct smb2_set_info_req) - 1); |
3144 | req->BufferLength = cpu_to_le32(*size); | 3199 | req->BufferLength = cpu_to_le32(*size); |
3145 | 3200 | ||
3146 | inc_rfc1001_len(req, *size - 1 /* Buffer */); | ||
3147 | |||
3148 | memcpy(req->Buffer, *data, *size); | 3201 | memcpy(req->Buffer, *data, *size); |
3202 | total_len += *size; | ||
3149 | 3203 | ||
3150 | iov[0].iov_base = (char *)req; | 3204 | iov[0].iov_base = (char *)req; |
3151 | /* 4 for RFC1001 length */ | 3205 | /* 1 for Buffer */ |
3152 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 3206 | iov[0].iov_len = total_len - 1; |
3153 | 3207 | ||
3154 | for (i = 1; i < num; i++) { | 3208 | for (i = 1; i < num; i++) { |
3155 | inc_rfc1001_len(req, size[i]); | ||
3156 | le32_add_cpu(&req->BufferLength, size[i]); | 3209 | le32_add_cpu(&req->BufferLength, size[i]); |
3157 | iov[i].iov_base = (char *)data[i]; | 3210 | iov[i].iov_base = (char *)data[i]; |
3158 | iov[i].iov_len = size[i]; | 3211 | iov[i].iov_len = size[i]; |
3159 | } | 3212 | } |
3160 | 3213 | ||
3161 | rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); | 3214 | rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags, |
3215 | &rsp_iov); | ||
3162 | cifs_small_buf_release(req); | 3216 | cifs_small_buf_release(req); |
3163 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; | 3217 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; |
3164 | 3218 | ||
@@ -3310,11 +3364,17 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, | |||
3310 | __u8 oplock_level) | 3364 | __u8 oplock_level) |
3311 | { | 3365 | { |
3312 | int rc; | 3366 | int rc; |
3313 | struct smb2_oplock_break *req = NULL; | 3367 | struct smb2_oplock_break_req *req = NULL; |
3368 | struct cifs_ses *ses = tcon->ses; | ||
3314 | int flags = CIFS_OBREAK_OP; | 3369 | int flags = CIFS_OBREAK_OP; |
3370 | unsigned int total_len; | ||
3371 | struct kvec iov[1]; | ||
3372 | struct kvec rsp_iov; | ||
3373 | int resp_buf_type; | ||
3315 | 3374 | ||
3316 | cifs_dbg(FYI, "SMB2_oplock_break\n"); | 3375 | cifs_dbg(FYI, "SMB2_oplock_break\n"); |
3317 | rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); | 3376 | rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req, |
3377 | &total_len); | ||
3318 | if (rc) | 3378 | if (rc) |
3319 | return rc; | 3379 | return rc; |
3320 | 3380 | ||
@@ -3324,9 +3384,14 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, | |||
3324 | req->VolatileFid = volatile_fid; | 3384 | req->VolatileFid = volatile_fid; |
3325 | req->PersistentFid = persistent_fid; | 3385 | req->PersistentFid = persistent_fid; |
3326 | req->OplockLevel = oplock_level; | 3386 | req->OplockLevel = oplock_level; |
3327 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); | 3387 | req->sync_hdr.CreditRequest = cpu_to_le16(1); |
3328 | 3388 | ||
3329 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); | 3389 | flags |= CIFS_NO_RESP; |
3390 | |||
3391 | iov[0].iov_base = (char *)req; | ||
3392 | iov[0].iov_len = total_len; | ||
3393 | |||
3394 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); | ||
3330 | cifs_small_buf_release(req); | 3395 | cifs_small_buf_release(req); |
3331 | 3396 | ||
3332 | if (rc) { | 3397 | if (rc) { |
@@ -3355,13 +3420,15 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level, | |||
3355 | { | 3420 | { |
3356 | int rc; | 3421 | int rc; |
3357 | struct smb2_query_info_req *req; | 3422 | struct smb2_query_info_req *req; |
3423 | unsigned int total_len; | ||
3358 | 3424 | ||
3359 | cifs_dbg(FYI, "Query FSInfo level %d\n", level); | 3425 | cifs_dbg(FYI, "Query FSInfo level %d\n", level); |
3360 | 3426 | ||
3361 | if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) | 3427 | if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) |
3362 | return -EIO; | 3428 | return -EIO; |
3363 | 3429 | ||
3364 | rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); | 3430 | rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req, |
3431 | &total_len); | ||
3365 | if (rc) | 3432 | if (rc) |
3366 | return rc; | 3433 | return rc; |
3367 | 3434 | ||
@@ -3369,15 +3436,14 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level, | |||
3369 | req->FileInfoClass = level; | 3436 | req->FileInfoClass = level; |
3370 | req->PersistentFileId = persistent_fid; | 3437 | req->PersistentFileId = persistent_fid; |
3371 | req->VolatileFileId = volatile_fid; | 3438 | req->VolatileFileId = volatile_fid; |
3372 | /* 4 for rfc1002 length field and 1 for pad */ | 3439 | /* 1 for pad */ |
3373 | req->InputBufferOffset = | 3440 | req->InputBufferOffset = |
3374 | cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); | 3441 | cpu_to_le16(sizeof(struct smb2_query_info_req) - 1); |
3375 | req->OutputBufferLength = cpu_to_le32( | 3442 | req->OutputBufferLength = cpu_to_le32( |
3376 | outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4); | 3443 | outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4); |
3377 | 3444 | ||
3378 | iov->iov_base = (char *)req; | 3445 | iov->iov_base = (char *)req; |
3379 | /* 4 for rfc1002 length field */ | 3446 | iov->iov_len = total_len; |
3380 | iov->iov_len = get_rfc1002_length(req) + 4; | ||
3381 | return 0; | 3447 | return 0; |
3382 | } | 3448 | } |
3383 | 3449 | ||
@@ -3403,7 +3469,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
3403 | if (encryption_required(tcon)) | 3469 | if (encryption_required(tcon)) |
3404 | flags |= CIFS_TRANSFORM_REQ; | 3470 | flags |= CIFS_TRANSFORM_REQ; |
3405 | 3471 | ||
3406 | rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); | 3472 | rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); |
3407 | cifs_small_buf_release(iov.iov_base); | 3473 | cifs_small_buf_release(iov.iov_base); |
3408 | if (rc) { | 3474 | if (rc) { |
3409 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); | 3475 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); |
@@ -3459,7 +3525,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon, | |||
3459 | if (encryption_required(tcon)) | 3525 | if (encryption_required(tcon)) |
3460 | flags |= CIFS_TRANSFORM_REQ; | 3526 | flags |= CIFS_TRANSFORM_REQ; |
3461 | 3527 | ||
3462 | rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); | 3528 | rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov); |
3463 | cifs_small_buf_release(iov.iov_base); | 3529 | cifs_small_buf_release(iov.iov_base); |
3464 | if (rc) { | 3530 | if (rc) { |
3465 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); | 3531 | cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE); |
@@ -3505,34 +3571,33 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon, | |||
3505 | int resp_buf_type; | 3571 | int resp_buf_type; |
3506 | unsigned int count; | 3572 | unsigned int count; |
3507 | int flags = CIFS_NO_RESP; | 3573 | int flags = CIFS_NO_RESP; |
3574 | unsigned int total_len; | ||
3508 | 3575 | ||
3509 | cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); | 3576 | cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock); |
3510 | 3577 | ||
3511 | rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req); | 3578 | rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len); |
3512 | if (rc) | 3579 | if (rc) |
3513 | return rc; | 3580 | return rc; |
3514 | 3581 | ||
3515 | if (encryption_required(tcon)) | 3582 | if (encryption_required(tcon)) |
3516 | flags |= CIFS_TRANSFORM_REQ; | 3583 | flags |= CIFS_TRANSFORM_REQ; |
3517 | 3584 | ||
3518 | req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid); | 3585 | req->sync_hdr.ProcessId = cpu_to_le32(pid); |
3519 | req->LockCount = cpu_to_le16(num_lock); | 3586 | req->LockCount = cpu_to_le16(num_lock); |
3520 | 3587 | ||
3521 | req->PersistentFileId = persist_fid; | 3588 | req->PersistentFileId = persist_fid; |
3522 | req->VolatileFileId = volatile_fid; | 3589 | req->VolatileFileId = volatile_fid; |
3523 | 3590 | ||
3524 | count = num_lock * sizeof(struct smb2_lock_element); | 3591 | count = num_lock * sizeof(struct smb2_lock_element); |
3525 | inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element)); | ||
3526 | 3592 | ||
3527 | iov[0].iov_base = (char *)req; | 3593 | iov[0].iov_base = (char *)req; |
3528 | /* 4 for rfc1002 length field and count for all locks */ | 3594 | iov[0].iov_len = total_len - sizeof(struct smb2_lock_element); |
3529 | iov[0].iov_len = get_rfc1002_length(req) + 4 - count; | ||
3530 | iov[1].iov_base = (char *)buf; | 3595 | iov[1].iov_base = (char *)buf; |
3531 | iov[1].iov_len = count; | 3596 | iov[1].iov_len = count; |
3532 | 3597 | ||
3533 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); | 3598 | cifs_stats_inc(&tcon->stats.cifs_stats.num_locks); |
3534 | rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, flags, | 3599 | rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags, |
3535 | &rsp_iov); | 3600 | &rsp_iov); |
3536 | cifs_small_buf_release(req); | 3601 | cifs_small_buf_release(req); |
3537 | if (rc) { | 3602 | if (rc) { |
3538 | cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); | 3603 | cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc); |
@@ -3565,24 +3630,35 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, | |||
3565 | { | 3630 | { |
3566 | int rc; | 3631 | int rc; |
3567 | struct smb2_lease_ack *req = NULL; | 3632 | struct smb2_lease_ack *req = NULL; |
3633 | struct cifs_ses *ses = tcon->ses; | ||
3568 | int flags = CIFS_OBREAK_OP; | 3634 | int flags = CIFS_OBREAK_OP; |
3635 | unsigned int total_len; | ||
3636 | struct kvec iov[1]; | ||
3637 | struct kvec rsp_iov; | ||
3638 | int resp_buf_type; | ||
3569 | 3639 | ||
3570 | cifs_dbg(FYI, "SMB2_lease_break\n"); | 3640 | cifs_dbg(FYI, "SMB2_lease_break\n"); |
3571 | rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); | 3641 | rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req, |
3642 | &total_len); | ||
3572 | if (rc) | 3643 | if (rc) |
3573 | return rc; | 3644 | return rc; |
3574 | 3645 | ||
3575 | if (encryption_required(tcon)) | 3646 | if (encryption_required(tcon)) |
3576 | flags |= CIFS_TRANSFORM_REQ; | 3647 | flags |= CIFS_TRANSFORM_REQ; |
3577 | 3648 | ||
3578 | req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1); | 3649 | req->sync_hdr.CreditRequest = cpu_to_le16(1); |
3579 | req->StructureSize = cpu_to_le16(36); | 3650 | req->StructureSize = cpu_to_le16(36); |
3580 | inc_rfc1001_len(req, 12); | 3651 | total_len += 12; |
3581 | 3652 | ||
3582 | memcpy(req->LeaseKey, lease_key, 16); | 3653 | memcpy(req->LeaseKey, lease_key, 16); |
3583 | req->LeaseState = lease_state; | 3654 | req->LeaseState = lease_state; |
3584 | 3655 | ||
3585 | rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags); | 3656 | flags |= CIFS_NO_RESP; |
3657 | |||
3658 | iov[0].iov_base = (char *)req; | ||
3659 | iov[0].iov_len = total_len; | ||
3660 | |||
3661 | rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); | ||
3586 | cifs_small_buf_release(req); | 3662 | cifs_small_buf_release(req); |
3587 | 3663 | ||
3588 | if (rc) { | 3664 | if (rc) { |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index c2ec934be968..6eb9f9691ed4 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
@@ -195,7 +195,7 @@ struct smb2_symlink_err_rsp { | |||
195 | #define SMB2_CLIENT_GUID_SIZE 16 | 195 | #define SMB2_CLIENT_GUID_SIZE 16 |
196 | 196 | ||
197 | struct smb2_negotiate_req { | 197 | struct smb2_negotiate_req { |
198 | struct smb2_hdr hdr; | 198 | struct smb2_sync_hdr sync_hdr; |
199 | __le16 StructureSize; /* Must be 36 */ | 199 | __le16 StructureSize; /* Must be 36 */ |
200 | __le16 DialectCount; | 200 | __le16 DialectCount; |
201 | __le16 SecurityMode; | 201 | __le16 SecurityMode; |
@@ -282,7 +282,7 @@ struct smb2_negotiate_rsp { | |||
282 | #define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04 | 282 | #define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04 |
283 | 283 | ||
284 | struct smb2_sess_setup_req { | 284 | struct smb2_sess_setup_req { |
285 | struct smb2_hdr hdr; | 285 | struct smb2_sync_hdr sync_hdr; |
286 | __le16 StructureSize; /* Must be 25 */ | 286 | __le16 StructureSize; /* Must be 25 */ |
287 | __u8 Flags; | 287 | __u8 Flags; |
288 | __u8 SecurityMode; | 288 | __u8 SecurityMode; |
@@ -308,7 +308,7 @@ struct smb2_sess_setup_rsp { | |||
308 | } __packed; | 308 | } __packed; |
309 | 309 | ||
310 | struct smb2_logoff_req { | 310 | struct smb2_logoff_req { |
311 | struct smb2_hdr hdr; | 311 | struct smb2_sync_hdr sync_hdr; |
312 | __le16 StructureSize; /* Must be 4 */ | 312 | __le16 StructureSize; /* Must be 4 */ |
313 | __le16 Reserved; | 313 | __le16 Reserved; |
314 | } __packed; | 314 | } __packed; |
@@ -323,7 +323,7 @@ struct smb2_logoff_rsp { | |||
323 | #define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001 | 323 | #define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001 |
324 | 324 | ||
325 | struct smb2_tree_connect_req { | 325 | struct smb2_tree_connect_req { |
326 | struct smb2_hdr hdr; | 326 | struct smb2_sync_hdr sync_hdr; |
327 | __le16 StructureSize; /* Must be 9 */ | 327 | __le16 StructureSize; /* Must be 9 */ |
328 | __le16 Reserved; /* Flags in SMB3.1.1 */ | 328 | __le16 Reserved; /* Flags in SMB3.1.1 */ |
329 | __le16 PathOffset; | 329 | __le16 PathOffset; |
@@ -375,7 +375,7 @@ struct smb2_tree_connect_rsp { | |||
375 | #define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */ | 375 | #define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */ |
376 | 376 | ||
377 | struct smb2_tree_disconnect_req { | 377 | struct smb2_tree_disconnect_req { |
378 | struct smb2_hdr hdr; | 378 | struct smb2_sync_hdr sync_hdr; |
379 | __le16 StructureSize; /* Must be 4 */ | 379 | __le16 StructureSize; /* Must be 4 */ |
380 | __le16 Reserved; | 380 | __le16 Reserved; |
381 | } __packed; | 381 | } __packed; |
@@ -496,7 +496,7 @@ struct smb2_tree_disconnect_rsp { | |||
496 | #define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9 | 496 | #define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9 |
497 | 497 | ||
498 | struct smb2_create_req { | 498 | struct smb2_create_req { |
499 | struct smb2_hdr hdr; | 499 | struct smb2_sync_hdr sync_hdr; |
500 | __le16 StructureSize; /* Must be 57 */ | 500 | __le16 StructureSize; /* Must be 57 */ |
501 | __u8 SecurityFlags; | 501 | __u8 SecurityFlags; |
502 | __u8 RequestedOplockLevel; | 502 | __u8 RequestedOplockLevel; |
@@ -753,7 +753,7 @@ struct duplicate_extents_to_file { | |||
753 | } __packed; | 753 | } __packed; |
754 | 754 | ||
755 | struct smb2_ioctl_req { | 755 | struct smb2_ioctl_req { |
756 | struct smb2_hdr hdr; | 756 | struct smb2_sync_hdr sync_hdr; |
757 | __le16 StructureSize; /* Must be 57 */ | 757 | __le16 StructureSize; /* Must be 57 */ |
758 | __u16 Reserved; | 758 | __u16 Reserved; |
759 | __le32 CtlCode; | 759 | __le32 CtlCode; |
@@ -789,7 +789,7 @@ struct smb2_ioctl_rsp { | |||
789 | /* Currently defined values for close flags */ | 789 | /* Currently defined values for close flags */ |
790 | #define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001) | 790 | #define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001) |
791 | struct smb2_close_req { | 791 | struct smb2_close_req { |
792 | struct smb2_hdr hdr; | 792 | struct smb2_sync_hdr sync_hdr; |
793 | __le16 StructureSize; /* Must be 24 */ | 793 | __le16 StructureSize; /* Must be 24 */ |
794 | __le16 Flags; | 794 | __le16 Flags; |
795 | __le32 Reserved; | 795 | __le32 Reserved; |
@@ -812,7 +812,7 @@ struct smb2_close_rsp { | |||
812 | } __packed; | 812 | } __packed; |
813 | 813 | ||
814 | struct smb2_flush_req { | 814 | struct smb2_flush_req { |
815 | struct smb2_hdr hdr; | 815 | struct smb2_sync_hdr sync_hdr; |
816 | __le16 StructureSize; /* Must be 24 */ | 816 | __le16 StructureSize; /* Must be 24 */ |
817 | __le16 Reserved1; | 817 | __le16 Reserved1; |
818 | __le32 Reserved2; | 818 | __le32 Reserved2; |
@@ -830,9 +830,9 @@ struct smb2_flush_rsp { | |||
830 | #define SMB2_READFLAG_READ_UNBUFFERED 0x01 | 830 | #define SMB2_READFLAG_READ_UNBUFFERED 0x01 |
831 | 831 | ||
832 | /* Channel field for read and write: exactly one of following flags can be set*/ | 832 | /* Channel field for read and write: exactly one of following flags can be set*/ |
833 | #define SMB2_CHANNEL_NONE 0x00000000 | 833 | #define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000) |
834 | #define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ | 834 | #define SMB2_CHANNEL_RDMA_V1 cpu_to_le32(0x00000001) /* SMB3 or later */ |
835 | #define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */ | 835 | #define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002) /* >= SMB3.02 */ |
836 | 836 | ||
837 | /* SMB2 read request without RFC1001 length at the beginning */ | 837 | /* SMB2 read request without RFC1001 length at the beginning */ |
838 | struct smb2_read_plain_req { | 838 | struct smb2_read_plain_req { |
@@ -847,8 +847,8 @@ struct smb2_read_plain_req { | |||
847 | __le32 MinimumCount; | 847 | __le32 MinimumCount; |
848 | __le32 Channel; /* MBZ except for SMB3 or later */ | 848 | __le32 Channel; /* MBZ except for SMB3 or later */ |
849 | __le32 RemainingBytes; | 849 | __le32 RemainingBytes; |
850 | __le16 ReadChannelInfoOffset; /* Reserved MBZ */ | 850 | __le16 ReadChannelInfoOffset; |
851 | __le16 ReadChannelInfoLength; /* Reserved MBZ */ | 851 | __le16 ReadChannelInfoLength; |
852 | __u8 Buffer[1]; | 852 | __u8 Buffer[1]; |
853 | } __packed; | 853 | } __packed; |
854 | 854 | ||
@@ -868,7 +868,7 @@ struct smb2_read_rsp { | |||
868 | #define SMB2_WRITEFLAG_WRITE_UNBUFFERED 0x00000002 /* SMB3.02 or later */ | 868 | #define SMB2_WRITEFLAG_WRITE_UNBUFFERED 0x00000002 /* SMB3.02 or later */ |
869 | 869 | ||
870 | struct smb2_write_req { | 870 | struct smb2_write_req { |
871 | struct smb2_hdr hdr; | 871 | struct smb2_sync_hdr sync_hdr; |
872 | __le16 StructureSize; /* Must be 49 */ | 872 | __le16 StructureSize; /* Must be 49 */ |
873 | __le16 DataOffset; /* offset from start of SMB2 header to write data */ | 873 | __le16 DataOffset; /* offset from start of SMB2 header to write data */ |
874 | __le32 Length; | 874 | __le32 Length; |
@@ -877,8 +877,8 @@ struct smb2_write_req { | |||
877 | __u64 VolatileFileId; /* opaque endianness */ | 877 | __u64 VolatileFileId; /* opaque endianness */ |
878 | __le32 Channel; /* Reserved MBZ */ | 878 | __le32 Channel; /* Reserved MBZ */ |
879 | __le32 RemainingBytes; | 879 | __le32 RemainingBytes; |
880 | __le16 WriteChannelInfoOffset; /* Reserved MBZ */ | 880 | __le16 WriteChannelInfoOffset; |
881 | __le16 WriteChannelInfoLength; /* Reserved MBZ */ | 881 | __le16 WriteChannelInfoLength; |
882 | __le32 Flags; | 882 | __le32 Flags; |
883 | __u8 Buffer[1]; | 883 | __u8 Buffer[1]; |
884 | } __packed; | 884 | } __packed; |
@@ -907,7 +907,7 @@ struct smb2_lock_element { | |||
907 | } __packed; | 907 | } __packed; |
908 | 908 | ||
909 | struct smb2_lock_req { | 909 | struct smb2_lock_req { |
910 | struct smb2_hdr hdr; | 910 | struct smb2_sync_hdr sync_hdr; |
911 | __le16 StructureSize; /* Must be 48 */ | 911 | __le16 StructureSize; /* Must be 48 */ |
912 | __le16 LockCount; | 912 | __le16 LockCount; |
913 | __le32 Reserved; | 913 | __le32 Reserved; |
@@ -924,7 +924,7 @@ struct smb2_lock_rsp { | |||
924 | } __packed; | 924 | } __packed; |
925 | 925 | ||
926 | struct smb2_echo_req { | 926 | struct smb2_echo_req { |
927 | struct smb2_hdr hdr; | 927 | struct smb2_sync_hdr sync_hdr; |
928 | __le16 StructureSize; /* Must be 4 */ | 928 | __le16 StructureSize; /* Must be 4 */ |
929 | __u16 Reserved; | 929 | __u16 Reserved; |
930 | } __packed; | 930 | } __packed; |
@@ -942,7 +942,7 @@ struct smb2_echo_rsp { | |||
942 | #define SMB2_REOPEN 0x10 | 942 | #define SMB2_REOPEN 0x10 |
943 | 943 | ||
944 | struct smb2_query_directory_req { | 944 | struct smb2_query_directory_req { |
945 | struct smb2_hdr hdr; | 945 | struct smb2_sync_hdr sync_hdr; |
946 | __le16 StructureSize; /* Must be 33 */ | 946 | __le16 StructureSize; /* Must be 33 */ |
947 | __u8 FileInformationClass; | 947 | __u8 FileInformationClass; |
948 | __u8 Flags; | 948 | __u8 Flags; |
@@ -989,7 +989,7 @@ struct smb2_query_directory_rsp { | |||
989 | #define SL_INDEX_SPECIFIED 0x00000004 | 989 | #define SL_INDEX_SPECIFIED 0x00000004 |
990 | 990 | ||
991 | struct smb2_query_info_req { | 991 | struct smb2_query_info_req { |
992 | struct smb2_hdr hdr; | 992 | struct smb2_sync_hdr sync_hdr; |
993 | __le16 StructureSize; /* Must be 41 */ | 993 | __le16 StructureSize; /* Must be 41 */ |
994 | __u8 InfoType; | 994 | __u8 InfoType; |
995 | __u8 FileInfoClass; | 995 | __u8 FileInfoClass; |
@@ -1013,7 +1013,7 @@ struct smb2_query_info_rsp { | |||
1013 | } __packed; | 1013 | } __packed; |
1014 | 1014 | ||
1015 | struct smb2_set_info_req { | 1015 | struct smb2_set_info_req { |
1016 | struct smb2_hdr hdr; | 1016 | struct smb2_sync_hdr sync_hdr; |
1017 | __le16 StructureSize; /* Must be 33 */ | 1017 | __le16 StructureSize; /* Must be 33 */ |
1018 | __u8 InfoType; | 1018 | __u8 InfoType; |
1019 | __u8 FileInfoClass; | 1019 | __u8 FileInfoClass; |
@@ -1031,7 +1031,19 @@ struct smb2_set_info_rsp { | |||
1031 | __le16 StructureSize; /* Must be 2 */ | 1031 | __le16 StructureSize; /* Must be 2 */ |
1032 | } __packed; | 1032 | } __packed; |
1033 | 1033 | ||
1034 | struct smb2_oplock_break { | 1034 | /* oplock break without an rfc1002 header */ |
1035 | struct smb2_oplock_break_req { | ||
1036 | struct smb2_sync_hdr sync_hdr; | ||
1037 | __le16 StructureSize; /* Must be 24 */ | ||
1038 | __u8 OplockLevel; | ||
1039 | __u8 Reserved; | ||
1040 | __le32 Reserved2; | ||
1041 | __u64 PersistentFid; | ||
1042 | __u64 VolatileFid; | ||
1043 | } __packed; | ||
1044 | |||
1045 | /* oplock break with an rfc1002 header */ | ||
1046 | struct smb2_oplock_break_rsp { | ||
1035 | struct smb2_hdr hdr; | 1047 | struct smb2_hdr hdr; |
1036 | __le16 StructureSize; /* Must be 24 */ | 1048 | __le16 StructureSize; /* Must be 24 */ |
1037 | __u8 OplockLevel; | 1049 | __u8 OplockLevel; |
@@ -1057,7 +1069,7 @@ struct smb2_lease_break { | |||
1057 | } __packed; | 1069 | } __packed; |
1058 | 1070 | ||
1059 | struct smb2_lease_ack { | 1071 | struct smb2_lease_ack { |
1060 | struct smb2_hdr hdr; | 1072 | struct smb2_sync_hdr sync_hdr; |
1061 | __le16 StructureSize; /* Must be 36 */ | 1073 | __le16 StructureSize; /* Must be 36 */ |
1062 | __le16 Reserved; | 1074 | __le16 Reserved; |
1063 | __le32 Flags; | 1075 | __le32 Flags; |
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index e9ab5227e7a8..05287b01f596 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
@@ -125,8 +125,7 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, | |||
125 | struct smb2_err_rsp **err_buf); | 125 | struct smb2_err_rsp **err_buf); |
126 | extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, | 126 | extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, |
127 | u64 persistent_fid, u64 volatile_fid, u32 opcode, | 127 | u64 persistent_fid, u64 volatile_fid, u32 opcode, |
128 | bool is_fsctl, bool use_ipc, | 128 | bool is_fsctl, char *in_data, u32 indatalen, |
129 | char *in_data, u32 indatalen, | ||
130 | char **out_data, u32 *plen /* returned data len */); | 129 | char **out_data, u32 *plen /* returned data len */); |
131 | extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | 130 | extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, |
132 | u64 persistent_file_id, u64 volatile_file_id); | 131 | u64 persistent_file_id, u64 volatile_file_id); |
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c new file mode 100644 index 000000000000..5130492847eb --- /dev/null +++ b/fs/cifs/smbdirect.c | |||
@@ -0,0 +1,2610 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017, Microsoft Corporation. | ||
3 | * | ||
4 | * Author(s): Long Li <longli@microsoft.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
14 | * the GNU General Public License for more details. | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/highmem.h> | ||
18 | #include "smbdirect.h" | ||
19 | #include "cifs_debug.h" | ||
20 | |||
21 | static struct smbd_response *get_empty_queue_buffer( | ||
22 | struct smbd_connection *info); | ||
23 | static struct smbd_response *get_receive_buffer( | ||
24 | struct smbd_connection *info); | ||
25 | static void put_receive_buffer( | ||
26 | struct smbd_connection *info, | ||
27 | struct smbd_response *response); | ||
28 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf); | ||
29 | static void destroy_receive_buffers(struct smbd_connection *info); | ||
30 | |||
31 | static void put_empty_packet( | ||
32 | struct smbd_connection *info, struct smbd_response *response); | ||
33 | static void enqueue_reassembly( | ||
34 | struct smbd_connection *info, | ||
35 | struct smbd_response *response, int data_length); | ||
36 | static struct smbd_response *_get_first_reassembly( | ||
37 | struct smbd_connection *info); | ||
38 | |||
39 | static int smbd_post_recv( | ||
40 | struct smbd_connection *info, | ||
41 | struct smbd_response *response); | ||
42 | |||
43 | static int smbd_post_send_empty(struct smbd_connection *info); | ||
44 | static int smbd_post_send_data( | ||
45 | struct smbd_connection *info, | ||
46 | struct kvec *iov, int n_vec, int remaining_data_length); | ||
47 | static int smbd_post_send_page(struct smbd_connection *info, | ||
48 | struct page *page, unsigned long offset, | ||
49 | size_t size, int remaining_data_length); | ||
50 | |||
51 | static void destroy_mr_list(struct smbd_connection *info); | ||
52 | static int allocate_mr_list(struct smbd_connection *info); | ||
53 | |||
54 | /* SMBD version number */ | ||
55 | #define SMBD_V1 0x0100 | ||
56 | |||
57 | /* Port numbers for SMBD transport */ | ||
58 | #define SMB_PORT 445 | ||
59 | #define SMBD_PORT 5445 | ||
60 | |||
61 | /* Address lookup and resolve timeout in ms */ | ||
62 | #define RDMA_RESOLVE_TIMEOUT 5000 | ||
63 | |||
64 | /* SMBD negotiation timeout in seconds */ | ||
65 | #define SMBD_NEGOTIATE_TIMEOUT 120 | ||
66 | |||
67 | /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */ | ||
68 | #define SMBD_MIN_RECEIVE_SIZE 128 | ||
69 | #define SMBD_MIN_FRAGMENTED_SIZE 131072 | ||
70 | |||
71 | /* | ||
72 | * Default maximum number of RDMA read/write outstanding on this connection | ||
73 | * This value is possibly decreased during QP creation on hardware limit | ||
74 | */ | ||
75 | #define SMBD_CM_RESPONDER_RESOURCES 32 | ||
76 | |||
77 | /* Maximum number of retries on data transfer operations */ | ||
78 | #define SMBD_CM_RETRY 6 | ||
79 | /* No need to retry on Receiver Not Ready since SMBD manages credits */ | ||
80 | #define SMBD_CM_RNR_RETRY 0 | ||
81 | |||
82 | /* | ||
83 | * User configurable initial values per SMBD transport connection | ||
84 | * as defined in [MS-SMBD] 3.1.1.1 | ||
85 | * Those may change after a SMBD negotiation | ||
86 | */ | ||
87 | /* The local peer's maximum number of credits to grant to the peer */ | ||
88 | int smbd_receive_credit_max = 255; | ||
89 | |||
90 | /* The remote peer's credit request of local peer */ | ||
91 | int smbd_send_credit_target = 255; | ||
92 | |||
93 | /* The maximum single message size can be sent to remote peer */ | ||
94 | int smbd_max_send_size = 1364; | ||
95 | |||
96 | /* The maximum fragmented upper-layer payload receive size supported */ | ||
97 | int smbd_max_fragmented_recv_size = 1024 * 1024; | ||
98 | |||
99 | /* The maximum single-message size which can be received */ | ||
100 | int smbd_max_receive_size = 8192; | ||
101 | |||
102 | /* The timeout to initiate send of a keepalive message on idle */ | ||
103 | int smbd_keep_alive_interval = 120; | ||
104 | |||
105 | /* | ||
106 | * User configurable initial values for RDMA transport | ||
107 | * The actual values used may be lower and are limited to hardware capabilities | ||
108 | */ | ||
109 | /* Default maximum number of SGEs in a RDMA write/read */ | ||
110 | int smbd_max_frmr_depth = 2048; | ||
111 | |||
112 | /* If payload is less than this byte, use RDMA send/recv not read/write */ | ||
113 | int rdma_readwrite_threshold = 4096; | ||
114 | |||
115 | /* Transport logging functions | ||
116 | * Logging are defined as classes. They can be OR'ed to define the actual | ||
117 | * logging level via module parameter smbd_logging_class | ||
118 | * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and | ||
119 | * log_rdma_event() | ||
120 | */ | ||
121 | #define LOG_OUTGOING 0x1 | ||
122 | #define LOG_INCOMING 0x2 | ||
123 | #define LOG_READ 0x4 | ||
124 | #define LOG_WRITE 0x8 | ||
125 | #define LOG_RDMA_SEND 0x10 | ||
126 | #define LOG_RDMA_RECV 0x20 | ||
127 | #define LOG_KEEP_ALIVE 0x40 | ||
128 | #define LOG_RDMA_EVENT 0x80 | ||
129 | #define LOG_RDMA_MR 0x100 | ||
130 | static unsigned int smbd_logging_class; | ||
131 | module_param(smbd_logging_class, uint, 0644); | ||
132 | MODULE_PARM_DESC(smbd_logging_class, | ||
133 | "Logging class for SMBD transport 0x0 to 0x100"); | ||
134 | |||
135 | #define ERR 0x0 | ||
136 | #define INFO 0x1 | ||
137 | static unsigned int smbd_logging_level = ERR; | ||
138 | module_param(smbd_logging_level, uint, 0644); | ||
139 | MODULE_PARM_DESC(smbd_logging_level, | ||
140 | "Logging level for SMBD transport, 0 (default): error, 1: info"); | ||
141 | |||
142 | #define log_rdma(level, class, fmt, args...) \ | ||
143 | do { \ | ||
144 | if (level <= smbd_logging_level || class & smbd_logging_class) \ | ||
145 | cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\ | ||
146 | } while (0) | ||
147 | |||
148 | #define log_outgoing(level, fmt, args...) \ | ||
149 | log_rdma(level, LOG_OUTGOING, fmt, ##args) | ||
150 | #define log_incoming(level, fmt, args...) \ | ||
151 | log_rdma(level, LOG_INCOMING, fmt, ##args) | ||
152 | #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args) | ||
153 | #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args) | ||
154 | #define log_rdma_send(level, fmt, args...) \ | ||
155 | log_rdma(level, LOG_RDMA_SEND, fmt, ##args) | ||
156 | #define log_rdma_recv(level, fmt, args...) \ | ||
157 | log_rdma(level, LOG_RDMA_RECV, fmt, ##args) | ||
158 | #define log_keep_alive(level, fmt, args...) \ | ||
159 | log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args) | ||
160 | #define log_rdma_event(level, fmt, args...) \ | ||
161 | log_rdma(level, LOG_RDMA_EVENT, fmt, ##args) | ||
162 | #define log_rdma_mr(level, fmt, args...) \ | ||
163 | log_rdma(level, LOG_RDMA_MR, fmt, ##args) | ||
164 | |||
165 | /* | ||
166 | * Destroy the transport and related RDMA and memory resources | ||
167 | * Need to go through all the pending counters and make sure on one is using | ||
168 | * the transport while it is destroyed | ||
169 | */ | ||
170 | static void smbd_destroy_rdma_work(struct work_struct *work) | ||
171 | { | ||
172 | struct smbd_response *response; | ||
173 | struct smbd_connection *info = | ||
174 | container_of(work, struct smbd_connection, destroy_work); | ||
175 | unsigned long flags; | ||
176 | |||
177 | log_rdma_event(INFO, "destroying qp\n"); | ||
178 | ib_drain_qp(info->id->qp); | ||
179 | rdma_destroy_qp(info->id); | ||
180 | |||
181 | /* Unblock all I/O waiting on the send queue */ | ||
182 | wake_up_interruptible_all(&info->wait_send_queue); | ||
183 | |||
184 | log_rdma_event(INFO, "cancelling idle timer\n"); | ||
185 | cancel_delayed_work_sync(&info->idle_timer_work); | ||
186 | log_rdma_event(INFO, "cancelling send immediate work\n"); | ||
187 | cancel_delayed_work_sync(&info->send_immediate_work); | ||
188 | |||
189 | log_rdma_event(INFO, "wait for all send to finish\n"); | ||
190 | wait_event(info->wait_smbd_send_pending, | ||
191 | info->smbd_send_pending == 0); | ||
192 | |||
193 | log_rdma_event(INFO, "wait for all recv to finish\n"); | ||
194 | wake_up_interruptible(&info->wait_reassembly_queue); | ||
195 | wait_event(info->wait_smbd_recv_pending, | ||
196 | info->smbd_recv_pending == 0); | ||
197 | |||
198 | log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); | ||
199 | wait_event(info->wait_send_pending, | ||
200 | atomic_read(&info->send_pending) == 0); | ||
201 | wait_event(info->wait_send_payload_pending, | ||
202 | atomic_read(&info->send_payload_pending) == 0); | ||
203 | |||
204 | log_rdma_event(INFO, "freeing mr list\n"); | ||
205 | wake_up_interruptible_all(&info->wait_mr); | ||
206 | wait_event(info->wait_for_mr_cleanup, | ||
207 | atomic_read(&info->mr_used_count) == 0); | ||
208 | destroy_mr_list(info); | ||
209 | |||
210 | /* It's not posssible for upper layer to get to reassembly */ | ||
211 | log_rdma_event(INFO, "drain the reassembly queue\n"); | ||
212 | do { | ||
213 | spin_lock_irqsave(&info->reassembly_queue_lock, flags); | ||
214 | response = _get_first_reassembly(info); | ||
215 | if (response) { | ||
216 | list_del(&response->list); | ||
217 | spin_unlock_irqrestore( | ||
218 | &info->reassembly_queue_lock, flags); | ||
219 | put_receive_buffer(info, response); | ||
220 | } | ||
221 | } while (response); | ||
222 | spin_unlock_irqrestore(&info->reassembly_queue_lock, flags); | ||
223 | info->reassembly_data_length = 0; | ||
224 | |||
225 | log_rdma_event(INFO, "free receive buffers\n"); | ||
226 | wait_event(info->wait_receive_queues, | ||
227 | info->count_receive_queue + info->count_empty_packet_queue | ||
228 | == info->receive_credit_max); | ||
229 | destroy_receive_buffers(info); | ||
230 | |||
231 | ib_free_cq(info->send_cq); | ||
232 | ib_free_cq(info->recv_cq); | ||
233 | ib_dealloc_pd(info->pd); | ||
234 | rdma_destroy_id(info->id); | ||
235 | |||
236 | /* free mempools */ | ||
237 | mempool_destroy(info->request_mempool); | ||
238 | kmem_cache_destroy(info->request_cache); | ||
239 | |||
240 | mempool_destroy(info->response_mempool); | ||
241 | kmem_cache_destroy(info->response_cache); | ||
242 | |||
243 | info->transport_status = SMBD_DESTROYED; | ||
244 | wake_up_all(&info->wait_destroy); | ||
245 | } | ||
246 | |||
247 | static int smbd_process_disconnected(struct smbd_connection *info) | ||
248 | { | ||
249 | schedule_work(&info->destroy_work); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void smbd_disconnect_rdma_work(struct work_struct *work) | ||
254 | { | ||
255 | struct smbd_connection *info = | ||
256 | container_of(work, struct smbd_connection, disconnect_work); | ||
257 | |||
258 | if (info->transport_status == SMBD_CONNECTED) { | ||
259 | info->transport_status = SMBD_DISCONNECTING; | ||
260 | rdma_disconnect(info->id); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static void smbd_disconnect_rdma_connection(struct smbd_connection *info) | ||
265 | { | ||
266 | queue_work(info->workqueue, &info->disconnect_work); | ||
267 | } | ||
268 | |||
269 | /* Upcall from RDMA CM */ | ||
270 | static int smbd_conn_upcall( | ||
271 | struct rdma_cm_id *id, struct rdma_cm_event *event) | ||
272 | { | ||
273 | struct smbd_connection *info = id->context; | ||
274 | |||
275 | log_rdma_event(INFO, "event=%d status=%d\n", | ||
276 | event->event, event->status); | ||
277 | |||
278 | switch (event->event) { | ||
279 | case RDMA_CM_EVENT_ADDR_RESOLVED: | ||
280 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | ||
281 | info->ri_rc = 0; | ||
282 | complete(&info->ri_done); | ||
283 | break; | ||
284 | |||
285 | case RDMA_CM_EVENT_ADDR_ERROR: | ||
286 | info->ri_rc = -EHOSTUNREACH; | ||
287 | complete(&info->ri_done); | ||
288 | break; | ||
289 | |||
290 | case RDMA_CM_EVENT_ROUTE_ERROR: | ||
291 | info->ri_rc = -ENETUNREACH; | ||
292 | complete(&info->ri_done); | ||
293 | break; | ||
294 | |||
295 | case RDMA_CM_EVENT_ESTABLISHED: | ||
296 | log_rdma_event(INFO, "connected event=%d\n", event->event); | ||
297 | info->transport_status = SMBD_CONNECTED; | ||
298 | wake_up_interruptible(&info->conn_wait); | ||
299 | break; | ||
300 | |||
301 | case RDMA_CM_EVENT_CONNECT_ERROR: | ||
302 | case RDMA_CM_EVENT_UNREACHABLE: | ||
303 | case RDMA_CM_EVENT_REJECTED: | ||
304 | log_rdma_event(INFO, "connecting failed event=%d\n", event->event); | ||
305 | info->transport_status = SMBD_DISCONNECTED; | ||
306 | wake_up_interruptible(&info->conn_wait); | ||
307 | break; | ||
308 | |||
309 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | ||
310 | case RDMA_CM_EVENT_DISCONNECTED: | ||
311 | /* This happenes when we fail the negotiation */ | ||
312 | if (info->transport_status == SMBD_NEGOTIATE_FAILED) { | ||
313 | info->transport_status = SMBD_DISCONNECTED; | ||
314 | wake_up(&info->conn_wait); | ||
315 | break; | ||
316 | } | ||
317 | |||
318 | info->transport_status = SMBD_DISCONNECTED; | ||
319 | smbd_process_disconnected(info); | ||
320 | break; | ||
321 | |||
322 | default: | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /* Upcall from RDMA QP */ | ||
330 | static void | ||
331 | smbd_qp_async_error_upcall(struct ib_event *event, void *context) | ||
332 | { | ||
333 | struct smbd_connection *info = context; | ||
334 | |||
335 | log_rdma_event(ERR, "%s on device %s info %p\n", | ||
336 | ib_event_msg(event->event), event->device->name, info); | ||
337 | |||
338 | switch (event->event) { | ||
339 | case IB_EVENT_CQ_ERR: | ||
340 | case IB_EVENT_QP_FATAL: | ||
341 | smbd_disconnect_rdma_connection(info); | ||
342 | |||
343 | default: | ||
344 | break; | ||
345 | } | ||
346 | } | ||
347 | |||
348 | static inline void *smbd_request_payload(struct smbd_request *request) | ||
349 | { | ||
350 | return (void *)request->packet; | ||
351 | } | ||
352 | |||
353 | static inline void *smbd_response_payload(struct smbd_response *response) | ||
354 | { | ||
355 | return (void *)response->packet; | ||
356 | } | ||
357 | |||
358 | /* Called when a RDMA send is done */ | ||
359 | static void send_done(struct ib_cq *cq, struct ib_wc *wc) | ||
360 | { | ||
361 | int i; | ||
362 | struct smbd_request *request = | ||
363 | container_of(wc->wr_cqe, struct smbd_request, cqe); | ||
364 | |||
365 | log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n", | ||
366 | request, wc->status); | ||
367 | |||
368 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { | ||
369 | log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", | ||
370 | wc->status, wc->opcode); | ||
371 | smbd_disconnect_rdma_connection(request->info); | ||
372 | } | ||
373 | |||
374 | for (i = 0; i < request->num_sge; i++) | ||
375 | ib_dma_unmap_single(request->info->id->device, | ||
376 | request->sge[i].addr, | ||
377 | request->sge[i].length, | ||
378 | DMA_TO_DEVICE); | ||
379 | |||
380 | if (request->has_payload) { | ||
381 | if (atomic_dec_and_test(&request->info->send_payload_pending)) | ||
382 | wake_up(&request->info->wait_send_payload_pending); | ||
383 | } else { | ||
384 | if (atomic_dec_and_test(&request->info->send_pending)) | ||
385 | wake_up(&request->info->wait_send_pending); | ||
386 | } | ||
387 | |||
388 | mempool_free(request, request->info->request_mempool); | ||
389 | } | ||
390 | |||
391 | static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp) | ||
392 | { | ||
393 | log_rdma_event(INFO, "resp message min_version %u max_version %u " | ||
394 | "negotiated_version %u credits_requested %u " | ||
395 | "credits_granted %u status %u max_readwrite_size %u " | ||
396 | "preferred_send_size %u max_receive_size %u " | ||
397 | "max_fragmented_size %u\n", | ||
398 | resp->min_version, resp->max_version, resp->negotiated_version, | ||
399 | resp->credits_requested, resp->credits_granted, resp->status, | ||
400 | resp->max_readwrite_size, resp->preferred_send_size, | ||
401 | resp->max_receive_size, resp->max_fragmented_size); | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * Process a negotiation response message, according to [MS-SMBD]3.1.5.7 | ||
406 | * response, packet_length: the negotiation response message | ||
407 | * return value: true if negotiation is a success, false if failed | ||
408 | */ | ||
409 | static bool process_negotiation_response( | ||
410 | struct smbd_response *response, int packet_length) | ||
411 | { | ||
412 | struct smbd_connection *info = response->info; | ||
413 | struct smbd_negotiate_resp *packet = smbd_response_payload(response); | ||
414 | |||
415 | if (packet_length < sizeof(struct smbd_negotiate_resp)) { | ||
416 | log_rdma_event(ERR, | ||
417 | "error: packet_length=%d\n", packet_length); | ||
418 | return false; | ||
419 | } | ||
420 | |||
421 | if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { | ||
422 | log_rdma_event(ERR, "error: negotiated_version=%x\n", | ||
423 | le16_to_cpu(packet->negotiated_version)); | ||
424 | return false; | ||
425 | } | ||
426 | info->protocol = le16_to_cpu(packet->negotiated_version); | ||
427 | |||
428 | if (packet->credits_requested == 0) { | ||
429 | log_rdma_event(ERR, "error: credits_requested==0\n"); | ||
430 | return false; | ||
431 | } | ||
432 | info->receive_credit_target = le16_to_cpu(packet->credits_requested); | ||
433 | |||
434 | if (packet->credits_granted == 0) { | ||
435 | log_rdma_event(ERR, "error: credits_granted==0\n"); | ||
436 | return false; | ||
437 | } | ||
438 | atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); | ||
439 | |||
440 | atomic_set(&info->receive_credits, 0); | ||
441 | |||
442 | if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { | ||
443 | log_rdma_event(ERR, "error: preferred_send_size=%d\n", | ||
444 | le32_to_cpu(packet->preferred_send_size)); | ||
445 | return false; | ||
446 | } | ||
447 | info->max_receive_size = le32_to_cpu(packet->preferred_send_size); | ||
448 | |||
449 | if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { | ||
450 | log_rdma_event(ERR, "error: max_receive_size=%d\n", | ||
451 | le32_to_cpu(packet->max_receive_size)); | ||
452 | return false; | ||
453 | } | ||
454 | info->max_send_size = min_t(int, info->max_send_size, | ||
455 | le32_to_cpu(packet->max_receive_size)); | ||
456 | |||
457 | if (le32_to_cpu(packet->max_fragmented_size) < | ||
458 | SMBD_MIN_FRAGMENTED_SIZE) { | ||
459 | log_rdma_event(ERR, "error: max_fragmented_size=%d\n", | ||
460 | le32_to_cpu(packet->max_fragmented_size)); | ||
461 | return false; | ||
462 | } | ||
463 | info->max_fragmented_send_size = | ||
464 | le32_to_cpu(packet->max_fragmented_size); | ||
465 | info->rdma_readwrite_threshold = | ||
466 | rdma_readwrite_threshold > info->max_fragmented_send_size ? | ||
467 | info->max_fragmented_send_size : | ||
468 | rdma_readwrite_threshold; | ||
469 | |||
470 | |||
471 | info->max_readwrite_size = min_t(u32, | ||
472 | le32_to_cpu(packet->max_readwrite_size), | ||
473 | info->max_frmr_depth * PAGE_SIZE); | ||
474 | info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; | ||
475 | |||
476 | return true; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Check and schedule to send an immediate packet | ||
481 | * This is used to extend credtis to remote peer to keep the transport busy | ||
482 | */ | ||
483 | static void check_and_send_immediate(struct smbd_connection *info) | ||
484 | { | ||
485 | if (info->transport_status != SMBD_CONNECTED) | ||
486 | return; | ||
487 | |||
488 | info->send_immediate = true; | ||
489 | |||
490 | /* | ||
491 | * Promptly send a packet if our peer is running low on receive | ||
492 | * credits | ||
493 | */ | ||
494 | if (atomic_read(&info->receive_credits) < | ||
495 | info->receive_credit_target - 1) | ||
496 | queue_delayed_work( | ||
497 | info->workqueue, &info->send_immediate_work, 0); | ||
498 | } | ||
499 | |||
500 | static void smbd_post_send_credits(struct work_struct *work) | ||
501 | { | ||
502 | int ret = 0; | ||
503 | int use_receive_queue = 1; | ||
504 | int rc; | ||
505 | struct smbd_response *response; | ||
506 | struct smbd_connection *info = | ||
507 | container_of(work, struct smbd_connection, | ||
508 | post_send_credits_work); | ||
509 | |||
510 | if (info->transport_status != SMBD_CONNECTED) { | ||
511 | wake_up(&info->wait_receive_queues); | ||
512 | return; | ||
513 | } | ||
514 | |||
515 | if (info->receive_credit_target > | ||
516 | atomic_read(&info->receive_credits)) { | ||
517 | while (true) { | ||
518 | if (use_receive_queue) | ||
519 | response = get_receive_buffer(info); | ||
520 | else | ||
521 | response = get_empty_queue_buffer(info); | ||
522 | if (!response) { | ||
523 | /* now switch to emtpy packet queue */ | ||
524 | if (use_receive_queue) { | ||
525 | use_receive_queue = 0; | ||
526 | continue; | ||
527 | } else | ||
528 | break; | ||
529 | } | ||
530 | |||
531 | response->type = SMBD_TRANSFER_DATA; | ||
532 | response->first_segment = false; | ||
533 | rc = smbd_post_recv(info, response); | ||
534 | if (rc) { | ||
535 | log_rdma_recv(ERR, | ||
536 | "post_recv failed rc=%d\n", rc); | ||
537 | put_receive_buffer(info, response); | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | ret++; | ||
542 | } | ||
543 | } | ||
544 | |||
545 | spin_lock(&info->lock_new_credits_offered); | ||
546 | info->new_credits_offered += ret; | ||
547 | spin_unlock(&info->lock_new_credits_offered); | ||
548 | |||
549 | atomic_add(ret, &info->receive_credits); | ||
550 | |||
551 | /* Check if we can post new receive and grant credits to peer */ | ||
552 | check_and_send_immediate(info); | ||
553 | } | ||
554 | |||
555 | static void smbd_recv_done_work(struct work_struct *work) | ||
556 | { | ||
557 | struct smbd_connection *info = | ||
558 | container_of(work, struct smbd_connection, recv_done_work); | ||
559 | |||
560 | /* | ||
561 | * We may have new send credits granted from remote peer | ||
562 | * If any sender is blcoked on lack of credets, unblock it | ||
563 | */ | ||
564 | if (atomic_read(&info->send_credits)) | ||
565 | wake_up_interruptible(&info->wait_send_queue); | ||
566 | |||
567 | /* | ||
568 | * Check if we need to send something to remote peer to | ||
569 | * grant more credits or respond to KEEP_ALIVE packet | ||
570 | */ | ||
571 | check_and_send_immediate(info); | ||
572 | } | ||
573 | |||
574 | /* Called from softirq, when recv is done */ | ||
575 | static void recv_done(struct ib_cq *cq, struct ib_wc *wc) | ||
576 | { | ||
577 | struct smbd_data_transfer *data_transfer; | ||
578 | struct smbd_response *response = | ||
579 | container_of(wc->wr_cqe, struct smbd_response, cqe); | ||
580 | struct smbd_connection *info = response->info; | ||
581 | int data_length = 0; | ||
582 | |||
583 | log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d " | ||
584 | "byte_len=%d pkey_index=%x\n", | ||
585 | response, response->type, wc->status, wc->opcode, | ||
586 | wc->byte_len, wc->pkey_index); | ||
587 | |||
588 | if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { | ||
589 | log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", | ||
590 | wc->status, wc->opcode); | ||
591 | smbd_disconnect_rdma_connection(info); | ||
592 | goto error; | ||
593 | } | ||
594 | |||
595 | ib_dma_sync_single_for_cpu( | ||
596 | wc->qp->device, | ||
597 | response->sge.addr, | ||
598 | response->sge.length, | ||
599 | DMA_FROM_DEVICE); | ||
600 | |||
601 | switch (response->type) { | ||
602 | /* SMBD negotiation response */ | ||
603 | case SMBD_NEGOTIATE_RESP: | ||
604 | dump_smbd_negotiate_resp(smbd_response_payload(response)); | ||
605 | info->full_packet_received = true; | ||
606 | info->negotiate_done = | ||
607 | process_negotiation_response(response, wc->byte_len); | ||
608 | complete(&info->negotiate_completion); | ||
609 | break; | ||
610 | |||
611 | /* SMBD data transfer packet */ | ||
612 | case SMBD_TRANSFER_DATA: | ||
613 | data_transfer = smbd_response_payload(response); | ||
614 | data_length = le32_to_cpu(data_transfer->data_length); | ||
615 | |||
616 | /* | ||
617 | * If this is a packet with data playload place the data in | ||
618 | * reassembly queue and wake up the reading thread | ||
619 | */ | ||
620 | if (data_length) { | ||
621 | if (info->full_packet_received) | ||
622 | response->first_segment = true; | ||
623 | |||
624 | if (le32_to_cpu(data_transfer->remaining_data_length)) | ||
625 | info->full_packet_received = false; | ||
626 | else | ||
627 | info->full_packet_received = true; | ||
628 | |||
629 | enqueue_reassembly( | ||
630 | info, | ||
631 | response, | ||
632 | data_length); | ||
633 | } else | ||
634 | put_empty_packet(info, response); | ||
635 | |||
636 | if (data_length) | ||
637 | wake_up_interruptible(&info->wait_reassembly_queue); | ||
638 | |||
639 | atomic_dec(&info->receive_credits); | ||
640 | info->receive_credit_target = | ||
641 | le16_to_cpu(data_transfer->credits_requested); | ||
642 | atomic_add(le16_to_cpu(data_transfer->credits_granted), | ||
643 | &info->send_credits); | ||
644 | |||
645 | log_incoming(INFO, "data flags %d data_offset %d " | ||
646 | "data_length %d remaining_data_length %d\n", | ||
647 | le16_to_cpu(data_transfer->flags), | ||
648 | le32_to_cpu(data_transfer->data_offset), | ||
649 | le32_to_cpu(data_transfer->data_length), | ||
650 | le32_to_cpu(data_transfer->remaining_data_length)); | ||
651 | |||
652 | /* Send a KEEP_ALIVE response right away if requested */ | ||
653 | info->keep_alive_requested = KEEP_ALIVE_NONE; | ||
654 | if (le16_to_cpu(data_transfer->flags) & | ||
655 | SMB_DIRECT_RESPONSE_REQUESTED) { | ||
656 | info->keep_alive_requested = KEEP_ALIVE_PENDING; | ||
657 | } | ||
658 | |||
659 | queue_work(info->workqueue, &info->recv_done_work); | ||
660 | return; | ||
661 | |||
662 | default: | ||
663 | log_rdma_recv(ERR, | ||
664 | "unexpected response type=%d\n", response->type); | ||
665 | } | ||
666 | |||
667 | error: | ||
668 | put_receive_buffer(info, response); | ||
669 | } | ||
670 | |||
671 | static struct rdma_cm_id *smbd_create_id( | ||
672 | struct smbd_connection *info, | ||
673 | struct sockaddr *dstaddr, int port) | ||
674 | { | ||
675 | struct rdma_cm_id *id; | ||
676 | int rc; | ||
677 | __be16 *sport; | ||
678 | |||
679 | id = rdma_create_id(&init_net, smbd_conn_upcall, info, | ||
680 | RDMA_PS_TCP, IB_QPT_RC); | ||
681 | if (IS_ERR(id)) { | ||
682 | rc = PTR_ERR(id); | ||
683 | log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc); | ||
684 | return id; | ||
685 | } | ||
686 | |||
687 | if (dstaddr->sa_family == AF_INET6) | ||
688 | sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; | ||
689 | else | ||
690 | sport = &((struct sockaddr_in *)dstaddr)->sin_port; | ||
691 | |||
692 | *sport = htons(port); | ||
693 | |||
694 | init_completion(&info->ri_done); | ||
695 | info->ri_rc = -ETIMEDOUT; | ||
696 | |||
697 | rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr, | ||
698 | RDMA_RESOLVE_TIMEOUT); | ||
699 | if (rc) { | ||
700 | log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc); | ||
701 | goto out; | ||
702 | } | ||
703 | wait_for_completion_interruptible_timeout( | ||
704 | &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); | ||
705 | rc = info->ri_rc; | ||
706 | if (rc) { | ||
707 | log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc); | ||
708 | goto out; | ||
709 | } | ||
710 | |||
711 | info->ri_rc = -ETIMEDOUT; | ||
712 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); | ||
713 | if (rc) { | ||
714 | log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc); | ||
715 | goto out; | ||
716 | } | ||
717 | wait_for_completion_interruptible_timeout( | ||
718 | &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); | ||
719 | rc = info->ri_rc; | ||
720 | if (rc) { | ||
721 | log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc); | ||
722 | goto out; | ||
723 | } | ||
724 | |||
725 | return id; | ||
726 | |||
727 | out: | ||
728 | rdma_destroy_id(id); | ||
729 | return ERR_PTR(rc); | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * Test if FRWR (Fast Registration Work Requests) is supported on the device | ||
734 | * This implementation requries FRWR on RDMA read/write | ||
735 | * return value: true if it is supported | ||
736 | */ | ||
737 | static bool frwr_is_supported(struct ib_device_attr *attrs) | ||
738 | { | ||
739 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) | ||
740 | return false; | ||
741 | if (attrs->max_fast_reg_page_list_len == 0) | ||
742 | return false; | ||
743 | return true; | ||
744 | } | ||
745 | |||
746 | static int smbd_ia_open( | ||
747 | struct smbd_connection *info, | ||
748 | struct sockaddr *dstaddr, int port) | ||
749 | { | ||
750 | int rc; | ||
751 | |||
752 | info->id = smbd_create_id(info, dstaddr, port); | ||
753 | if (IS_ERR(info->id)) { | ||
754 | rc = PTR_ERR(info->id); | ||
755 | goto out1; | ||
756 | } | ||
757 | |||
758 | if (!frwr_is_supported(&info->id->device->attrs)) { | ||
759 | log_rdma_event(ERR, | ||
760 | "Fast Registration Work Requests " | ||
761 | "(FRWR) is not supported\n"); | ||
762 | log_rdma_event(ERR, | ||
763 | "Device capability flags = %llx " | ||
764 | "max_fast_reg_page_list_len = %u\n", | ||
765 | info->id->device->attrs.device_cap_flags, | ||
766 | info->id->device->attrs.max_fast_reg_page_list_len); | ||
767 | rc = -EPROTONOSUPPORT; | ||
768 | goto out2; | ||
769 | } | ||
770 | info->max_frmr_depth = min_t(int, | ||
771 | smbd_max_frmr_depth, | ||
772 | info->id->device->attrs.max_fast_reg_page_list_len); | ||
773 | info->mr_type = IB_MR_TYPE_MEM_REG; | ||
774 | if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) | ||
775 | info->mr_type = IB_MR_TYPE_SG_GAPS; | ||
776 | |||
777 | info->pd = ib_alloc_pd(info->id->device, 0); | ||
778 | if (IS_ERR(info->pd)) { | ||
779 | rc = PTR_ERR(info->pd); | ||
780 | log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc); | ||
781 | goto out2; | ||
782 | } | ||
783 | |||
784 | return 0; | ||
785 | |||
786 | out2: | ||
787 | rdma_destroy_id(info->id); | ||
788 | info->id = NULL; | ||
789 | |||
790 | out1: | ||
791 | return rc; | ||
792 | } | ||
793 | |||
794 | /* | ||
795 | * Send a negotiation request message to the peer | ||
796 | * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3 | ||
797 | * After negotiation, the transport is connected and ready for | ||
798 | * carrying upper layer SMB payload | ||
799 | */ | ||
800 | static int smbd_post_send_negotiate_req(struct smbd_connection *info) | ||
801 | { | ||
802 | struct ib_send_wr send_wr, *send_wr_fail; | ||
803 | int rc = -ENOMEM; | ||
804 | struct smbd_request *request; | ||
805 | struct smbd_negotiate_req *packet; | ||
806 | |||
807 | request = mempool_alloc(info->request_mempool, GFP_KERNEL); | ||
808 | if (!request) | ||
809 | return rc; | ||
810 | |||
811 | request->info = info; | ||
812 | |||
813 | packet = smbd_request_payload(request); | ||
814 | packet->min_version = cpu_to_le16(SMBD_V1); | ||
815 | packet->max_version = cpu_to_le16(SMBD_V1); | ||
816 | packet->reserved = 0; | ||
817 | packet->credits_requested = cpu_to_le16(info->send_credit_target); | ||
818 | packet->preferred_send_size = cpu_to_le32(info->max_send_size); | ||
819 | packet->max_receive_size = cpu_to_le32(info->max_receive_size); | ||
820 | packet->max_fragmented_size = | ||
821 | cpu_to_le32(info->max_fragmented_recv_size); | ||
822 | |||
823 | request->num_sge = 1; | ||
824 | request->sge[0].addr = ib_dma_map_single( | ||
825 | info->id->device, (void *)packet, | ||
826 | sizeof(*packet), DMA_TO_DEVICE); | ||
827 | if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { | ||
828 | rc = -EIO; | ||
829 | goto dma_mapping_failed; | ||
830 | } | ||
831 | |||
832 | request->sge[0].length = sizeof(*packet); | ||
833 | request->sge[0].lkey = info->pd->local_dma_lkey; | ||
834 | |||
835 | ib_dma_sync_single_for_device( | ||
836 | info->id->device, request->sge[0].addr, | ||
837 | request->sge[0].length, DMA_TO_DEVICE); | ||
838 | |||
839 | request->cqe.done = send_done; | ||
840 | |||
841 | send_wr.next = NULL; | ||
842 | send_wr.wr_cqe = &request->cqe; | ||
843 | send_wr.sg_list = request->sge; | ||
844 | send_wr.num_sge = request->num_sge; | ||
845 | send_wr.opcode = IB_WR_SEND; | ||
846 | send_wr.send_flags = IB_SEND_SIGNALED; | ||
847 | |||
848 | log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n", | ||
849 | request->sge[0].addr, | ||
850 | request->sge[0].length, request->sge[0].lkey); | ||
851 | |||
852 | request->has_payload = false; | ||
853 | atomic_inc(&info->send_pending); | ||
854 | rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); | ||
855 | if (!rc) | ||
856 | return 0; | ||
857 | |||
858 | /* if we reach here, post send failed */ | ||
859 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); | ||
860 | atomic_dec(&info->send_pending); | ||
861 | ib_dma_unmap_single(info->id->device, request->sge[0].addr, | ||
862 | request->sge[0].length, DMA_TO_DEVICE); | ||
863 | |||
864 | dma_mapping_failed: | ||
865 | mempool_free(request, info->request_mempool); | ||
866 | return rc; | ||
867 | } | ||
868 | |||
869 | /* | ||
870 | * Extend the credits to remote peer | ||
871 | * This implements [MS-SMBD] 3.1.5.9 | ||
872 | * The idea is that we should extend credits to remote peer as quickly as | ||
873 | * it's allowed, to maintain data flow. We allocate as much receive | ||
874 | * buffer as possible, and extend the receive credits to remote peer | ||
875 | * return value: the new credtis being granted. | ||
876 | */ | ||
877 | static int manage_credits_prior_sending(struct smbd_connection *info) | ||
878 | { | ||
879 | int new_credits; | ||
880 | |||
881 | spin_lock(&info->lock_new_credits_offered); | ||
882 | new_credits = info->new_credits_offered; | ||
883 | info->new_credits_offered = 0; | ||
884 | spin_unlock(&info->lock_new_credits_offered); | ||
885 | |||
886 | return new_credits; | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * Check if we need to send a KEEP_ALIVE message | ||
891 | * The idle connection timer triggers a KEEP_ALIVE message when expires | ||
892 | * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send | ||
893 | * back a response. | ||
894 | * return value: | ||
895 | * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set | ||
896 | * 0: otherwise | ||
897 | */ | ||
898 | static int manage_keep_alive_before_sending(struct smbd_connection *info) | ||
899 | { | ||
900 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { | ||
901 | info->keep_alive_requested = KEEP_ALIVE_SENT; | ||
902 | return 1; | ||
903 | } | ||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * Build and prepare the SMBD packet header | ||
909 | * This function waits for avaialbe send credits and build a SMBD packet | ||
910 | * header. The caller then optional append payload to the packet after | ||
911 | * the header | ||
912 | * intput values | ||
913 | * size: the size of the payload | ||
914 | * remaining_data_length: remaining data to send if this is part of a | ||
915 | * fragmented packet | ||
916 | * output values | ||
917 | * request_out: the request allocated from this function | ||
918 | * return values: 0 on success, otherwise actual error code returned | ||
919 | */ | ||
920 | static int smbd_create_header(struct smbd_connection *info, | ||
921 | int size, int remaining_data_length, | ||
922 | struct smbd_request **request_out) | ||
923 | { | ||
924 | struct smbd_request *request; | ||
925 | struct smbd_data_transfer *packet; | ||
926 | int header_length; | ||
927 | int rc; | ||
928 | |||
929 | /* Wait for send credits. A SMBD packet needs one credit */ | ||
930 | rc = wait_event_interruptible(info->wait_send_queue, | ||
931 | atomic_read(&info->send_credits) > 0 || | ||
932 | info->transport_status != SMBD_CONNECTED); | ||
933 | if (rc) | ||
934 | return rc; | ||
935 | |||
936 | if (info->transport_status != SMBD_CONNECTED) { | ||
937 | log_outgoing(ERR, "disconnected not sending\n"); | ||
938 | return -ENOENT; | ||
939 | } | ||
940 | atomic_dec(&info->send_credits); | ||
941 | |||
942 | request = mempool_alloc(info->request_mempool, GFP_KERNEL); | ||
943 | if (!request) { | ||
944 | rc = -ENOMEM; | ||
945 | goto err; | ||
946 | } | ||
947 | |||
948 | request->info = info; | ||
949 | |||
950 | /* Fill in the packet header */ | ||
951 | packet = smbd_request_payload(request); | ||
952 | packet->credits_requested = cpu_to_le16(info->send_credit_target); | ||
953 | packet->credits_granted = | ||
954 | cpu_to_le16(manage_credits_prior_sending(info)); | ||
955 | info->send_immediate = false; | ||
956 | |||
957 | packet->flags = 0; | ||
958 | if (manage_keep_alive_before_sending(info)) | ||
959 | packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); | ||
960 | |||
961 | packet->reserved = 0; | ||
962 | if (!size) | ||
963 | packet->data_offset = 0; | ||
964 | else | ||
965 | packet->data_offset = cpu_to_le32(24); | ||
966 | packet->data_length = cpu_to_le32(size); | ||
967 | packet->remaining_data_length = cpu_to_le32(remaining_data_length); | ||
968 | packet->padding = 0; | ||
969 | |||
970 | log_outgoing(INFO, "credits_requested=%d credits_granted=%d " | ||
971 | "data_offset=%d data_length=%d remaining_data_length=%d\n", | ||
972 | le16_to_cpu(packet->credits_requested), | ||
973 | le16_to_cpu(packet->credits_granted), | ||
974 | le32_to_cpu(packet->data_offset), | ||
975 | le32_to_cpu(packet->data_length), | ||
976 | le32_to_cpu(packet->remaining_data_length)); | ||
977 | |||
978 | /* Map the packet to DMA */ | ||
979 | header_length = sizeof(struct smbd_data_transfer); | ||
980 | /* If this is a packet without payload, don't send padding */ | ||
981 | if (!size) | ||
982 | header_length = offsetof(struct smbd_data_transfer, padding); | ||
983 | |||
984 | request->num_sge = 1; | ||
985 | request->sge[0].addr = ib_dma_map_single(info->id->device, | ||
986 | (void *)packet, | ||
987 | header_length, | ||
988 | DMA_BIDIRECTIONAL); | ||
989 | if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { | ||
990 | mempool_free(request, info->request_mempool); | ||
991 | rc = -EIO; | ||
992 | goto err; | ||
993 | } | ||
994 | |||
995 | request->sge[0].length = header_length; | ||
996 | request->sge[0].lkey = info->pd->local_dma_lkey; | ||
997 | |||
998 | *request_out = request; | ||
999 | return 0; | ||
1000 | |||
1001 | err: | ||
1002 | atomic_inc(&info->send_credits); | ||
1003 | return rc; | ||
1004 | } | ||
1005 | |||
1006 | static void smbd_destroy_header(struct smbd_connection *info, | ||
1007 | struct smbd_request *request) | ||
1008 | { | ||
1009 | |||
1010 | ib_dma_unmap_single(info->id->device, | ||
1011 | request->sge[0].addr, | ||
1012 | request->sge[0].length, | ||
1013 | DMA_TO_DEVICE); | ||
1014 | mempool_free(request, info->request_mempool); | ||
1015 | atomic_inc(&info->send_credits); | ||
1016 | } | ||
1017 | |||
1018 | /* Post the send request */ | ||
1019 | static int smbd_post_send(struct smbd_connection *info, | ||
1020 | struct smbd_request *request, bool has_payload) | ||
1021 | { | ||
1022 | struct ib_send_wr send_wr, *send_wr_fail; | ||
1023 | int rc, i; | ||
1024 | |||
1025 | for (i = 0; i < request->num_sge; i++) { | ||
1026 | log_rdma_send(INFO, | ||
1027 | "rdma_request sge[%d] addr=%llu legnth=%u\n", | ||
1028 | i, request->sge[0].addr, request->sge[0].length); | ||
1029 | ib_dma_sync_single_for_device( | ||
1030 | info->id->device, | ||
1031 | request->sge[i].addr, | ||
1032 | request->sge[i].length, | ||
1033 | DMA_TO_DEVICE); | ||
1034 | } | ||
1035 | |||
1036 | request->cqe.done = send_done; | ||
1037 | |||
1038 | send_wr.next = NULL; | ||
1039 | send_wr.wr_cqe = &request->cqe; | ||
1040 | send_wr.sg_list = request->sge; | ||
1041 | send_wr.num_sge = request->num_sge; | ||
1042 | send_wr.opcode = IB_WR_SEND; | ||
1043 | send_wr.send_flags = IB_SEND_SIGNALED; | ||
1044 | |||
1045 | if (has_payload) { | ||
1046 | request->has_payload = true; | ||
1047 | atomic_inc(&info->send_payload_pending); | ||
1048 | } else { | ||
1049 | request->has_payload = false; | ||
1050 | atomic_inc(&info->send_pending); | ||
1051 | } | ||
1052 | |||
1053 | rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); | ||
1054 | if (rc) { | ||
1055 | log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); | ||
1056 | if (has_payload) { | ||
1057 | if (atomic_dec_and_test(&info->send_payload_pending)) | ||
1058 | wake_up(&info->wait_send_payload_pending); | ||
1059 | } else { | ||
1060 | if (atomic_dec_and_test(&info->send_pending)) | ||
1061 | wake_up(&info->wait_send_pending); | ||
1062 | } | ||
1063 | } else | ||
1064 | /* Reset timer for idle connection after packet is sent */ | ||
1065 | mod_delayed_work(info->workqueue, &info->idle_timer_work, | ||
1066 | info->keep_alive_interval*HZ); | ||
1067 | |||
1068 | return rc; | ||
1069 | } | ||
1070 | |||
1071 | static int smbd_post_send_sgl(struct smbd_connection *info, | ||
1072 | struct scatterlist *sgl, int data_length, int remaining_data_length) | ||
1073 | { | ||
1074 | int num_sgs; | ||
1075 | int i, rc; | ||
1076 | struct smbd_request *request; | ||
1077 | struct scatterlist *sg; | ||
1078 | |||
1079 | rc = smbd_create_header( | ||
1080 | info, data_length, remaining_data_length, &request); | ||
1081 | if (rc) | ||
1082 | return rc; | ||
1083 | |||
1084 | num_sgs = sgl ? sg_nents(sgl) : 0; | ||
1085 | for_each_sg(sgl, sg, num_sgs, i) { | ||
1086 | request->sge[i+1].addr = | ||
1087 | ib_dma_map_page(info->id->device, sg_page(sg), | ||
1088 | sg->offset, sg->length, DMA_BIDIRECTIONAL); | ||
1089 | if (ib_dma_mapping_error( | ||
1090 | info->id->device, request->sge[i+1].addr)) { | ||
1091 | rc = -EIO; | ||
1092 | request->sge[i+1].addr = 0; | ||
1093 | goto dma_mapping_failure; | ||
1094 | } | ||
1095 | request->sge[i+1].length = sg->length; | ||
1096 | request->sge[i+1].lkey = info->pd->local_dma_lkey; | ||
1097 | request->num_sge++; | ||
1098 | } | ||
1099 | |||
1100 | rc = smbd_post_send(info, request, data_length); | ||
1101 | if (!rc) | ||
1102 | return 0; | ||
1103 | |||
1104 | dma_mapping_failure: | ||
1105 | for (i = 1; i < request->num_sge; i++) | ||
1106 | if (request->sge[i].addr) | ||
1107 | ib_dma_unmap_single(info->id->device, | ||
1108 | request->sge[i].addr, | ||
1109 | request->sge[i].length, | ||
1110 | DMA_TO_DEVICE); | ||
1111 | smbd_destroy_header(info, request); | ||
1112 | return rc; | ||
1113 | } | ||
1114 | |||
1115 | /* | ||
1116 | * Send a page | ||
1117 | * page: the page to send | ||
1118 | * offset: offset in the page to send | ||
1119 | * size: length in the page to send | ||
1120 | * remaining_data_length: remaining data to send in this payload | ||
1121 | */ | ||
1122 | static int smbd_post_send_page(struct smbd_connection *info, struct page *page, | ||
1123 | unsigned long offset, size_t size, int remaining_data_length) | ||
1124 | { | ||
1125 | struct scatterlist sgl; | ||
1126 | |||
1127 | sg_init_table(&sgl, 1); | ||
1128 | sg_set_page(&sgl, page, size, offset); | ||
1129 | |||
1130 | return smbd_post_send_sgl(info, &sgl, size, remaining_data_length); | ||
1131 | } | ||
1132 | |||
1133 | /* | ||
1134 | * Send an empty message | ||
1135 | * Empty message is used to extend credits to peer to for keep live | ||
1136 | * while there is no upper layer payload to send at the time | ||
1137 | */ | ||
1138 | static int smbd_post_send_empty(struct smbd_connection *info) | ||
1139 | { | ||
1140 | info->count_send_empty++; | ||
1141 | return smbd_post_send_sgl(info, NULL, 0, 0); | ||
1142 | } | ||
1143 | |||
1144 | /* | ||
1145 | * Send a data buffer | ||
1146 | * iov: the iov array describing the data buffers | ||
1147 | * n_vec: number of iov array | ||
1148 | * remaining_data_length: remaining data to send following this packet | ||
1149 | * in segmented SMBD packet | ||
1150 | */ | ||
1151 | static int smbd_post_send_data( | ||
1152 | struct smbd_connection *info, struct kvec *iov, int n_vec, | ||
1153 | int remaining_data_length) | ||
1154 | { | ||
1155 | int i; | ||
1156 | u32 data_length = 0; | ||
1157 | struct scatterlist sgl[SMBDIRECT_MAX_SGE]; | ||
1158 | |||
1159 | if (n_vec > SMBDIRECT_MAX_SGE) { | ||
1160 | cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec); | ||
1161 | return -ENOMEM; | ||
1162 | } | ||
1163 | |||
1164 | sg_init_table(sgl, n_vec); | ||
1165 | for (i = 0; i < n_vec; i++) { | ||
1166 | data_length += iov[i].iov_len; | ||
1167 | sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len); | ||
1168 | } | ||
1169 | |||
1170 | return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length); | ||
1171 | } | ||
1172 | |||
1173 | /* | ||
1174 | * Post a receive request to the transport | ||
1175 | * The remote peer can only send data when a receive request is posted | ||
1176 | * The interaction is controlled by send/receive credit system | ||
1177 | */ | ||
1178 | static int smbd_post_recv( | ||
1179 | struct smbd_connection *info, struct smbd_response *response) | ||
1180 | { | ||
1181 | struct ib_recv_wr recv_wr, *recv_wr_fail = NULL; | ||
1182 | int rc = -EIO; | ||
1183 | |||
1184 | response->sge.addr = ib_dma_map_single( | ||
1185 | info->id->device, response->packet, | ||
1186 | info->max_receive_size, DMA_FROM_DEVICE); | ||
1187 | if (ib_dma_mapping_error(info->id->device, response->sge.addr)) | ||
1188 | return rc; | ||
1189 | |||
1190 | response->sge.length = info->max_receive_size; | ||
1191 | response->sge.lkey = info->pd->local_dma_lkey; | ||
1192 | |||
1193 | response->cqe.done = recv_done; | ||
1194 | |||
1195 | recv_wr.wr_cqe = &response->cqe; | ||
1196 | recv_wr.next = NULL; | ||
1197 | recv_wr.sg_list = &response->sge; | ||
1198 | recv_wr.num_sge = 1; | ||
1199 | |||
1200 | rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail); | ||
1201 | if (rc) { | ||
1202 | ib_dma_unmap_single(info->id->device, response->sge.addr, | ||
1203 | response->sge.length, DMA_FROM_DEVICE); | ||
1204 | |||
1205 | log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); | ||
1206 | } | ||
1207 | |||
1208 | return rc; | ||
1209 | } | ||
1210 | |||
1211 | /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */ | ||
1212 | static int smbd_negotiate(struct smbd_connection *info) | ||
1213 | { | ||
1214 | int rc; | ||
1215 | struct smbd_response *response = get_receive_buffer(info); | ||
1216 | |||
1217 | response->type = SMBD_NEGOTIATE_RESP; | ||
1218 | rc = smbd_post_recv(info, response); | ||
1219 | log_rdma_event(INFO, | ||
1220 | "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x " | ||
1221 | "iov.lkey=%x\n", | ||
1222 | rc, response->sge.addr, | ||
1223 | response->sge.length, response->sge.lkey); | ||
1224 | if (rc) | ||
1225 | return rc; | ||
1226 | |||
1227 | init_completion(&info->negotiate_completion); | ||
1228 | info->negotiate_done = false; | ||
1229 | rc = smbd_post_send_negotiate_req(info); | ||
1230 | if (rc) | ||
1231 | return rc; | ||
1232 | |||
1233 | rc = wait_for_completion_interruptible_timeout( | ||
1234 | &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); | ||
1235 | log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc); | ||
1236 | |||
1237 | if (info->negotiate_done) | ||
1238 | return 0; | ||
1239 | |||
1240 | if (rc == 0) | ||
1241 | rc = -ETIMEDOUT; | ||
1242 | else if (rc == -ERESTARTSYS) | ||
1243 | rc = -EINTR; | ||
1244 | else | ||
1245 | rc = -ENOTCONN; | ||
1246 | |||
1247 | return rc; | ||
1248 | } | ||
1249 | |||
1250 | static void put_empty_packet( | ||
1251 | struct smbd_connection *info, struct smbd_response *response) | ||
1252 | { | ||
1253 | spin_lock(&info->empty_packet_queue_lock); | ||
1254 | list_add_tail(&response->list, &info->empty_packet_queue); | ||
1255 | info->count_empty_packet_queue++; | ||
1256 | spin_unlock(&info->empty_packet_queue_lock); | ||
1257 | |||
1258 | queue_work(info->workqueue, &info->post_send_credits_work); | ||
1259 | } | ||
1260 | |||
1261 | /* | ||
1262 | * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1 | ||
1263 | * This is a queue for reassembling upper layer payload and present to upper | ||
1264 | * layer. All the inncoming payload go to the reassembly queue, regardless of | ||
1265 | * if reassembly is required. The uuper layer code reads from the queue for all | ||
1266 | * incoming payloads. | ||
1267 | * Put a received packet to the reassembly queue | ||
1268 | * response: the packet received | ||
1269 | * data_length: the size of payload in this packet | ||
1270 | */ | ||
1271 | static void enqueue_reassembly( | ||
1272 | struct smbd_connection *info, | ||
1273 | struct smbd_response *response, | ||
1274 | int data_length) | ||
1275 | { | ||
1276 | spin_lock(&info->reassembly_queue_lock); | ||
1277 | list_add_tail(&response->list, &info->reassembly_queue); | ||
1278 | info->reassembly_queue_length++; | ||
1279 | /* | ||
1280 | * Make sure reassembly_data_length is updated after list and | ||
1281 | * reassembly_queue_length are updated. On the dequeue side | ||
1282 | * reassembly_data_length is checked without a lock to determine | ||
1283 | * if reassembly_queue_length and list is up to date | ||
1284 | */ | ||
1285 | virt_wmb(); | ||
1286 | info->reassembly_data_length += data_length; | ||
1287 | spin_unlock(&info->reassembly_queue_lock); | ||
1288 | info->count_reassembly_queue++; | ||
1289 | info->count_enqueue_reassembly_queue++; | ||
1290 | } | ||
1291 | |||
1292 | /* | ||
1293 | * Get the first entry at the front of reassembly queue | ||
1294 | * Caller is responsible for locking | ||
1295 | * return value: the first entry if any, NULL if queue is empty | ||
1296 | */ | ||
1297 | static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) | ||
1298 | { | ||
1299 | struct smbd_response *ret = NULL; | ||
1300 | |||
1301 | if (!list_empty(&info->reassembly_queue)) { | ||
1302 | ret = list_first_entry( | ||
1303 | &info->reassembly_queue, | ||
1304 | struct smbd_response, list); | ||
1305 | } | ||
1306 | return ret; | ||
1307 | } | ||
1308 | |||
1309 | static struct smbd_response *get_empty_queue_buffer( | ||
1310 | struct smbd_connection *info) | ||
1311 | { | ||
1312 | struct smbd_response *ret = NULL; | ||
1313 | unsigned long flags; | ||
1314 | |||
1315 | spin_lock_irqsave(&info->empty_packet_queue_lock, flags); | ||
1316 | if (!list_empty(&info->empty_packet_queue)) { | ||
1317 | ret = list_first_entry( | ||
1318 | &info->empty_packet_queue, | ||
1319 | struct smbd_response, list); | ||
1320 | list_del(&ret->list); | ||
1321 | info->count_empty_packet_queue--; | ||
1322 | } | ||
1323 | spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); | ||
1324 | |||
1325 | return ret; | ||
1326 | } | ||
1327 | |||
1328 | /* | ||
1329 | * Get a receive buffer | ||
1330 | * For each remote send, we need to post a receive. The receive buffers are | ||
1331 | * pre-allocated in advance. | ||
1332 | * return value: the receive buffer, NULL if none is available | ||
1333 | */ | ||
1334 | static struct smbd_response *get_receive_buffer(struct smbd_connection *info) | ||
1335 | { | ||
1336 | struct smbd_response *ret = NULL; | ||
1337 | unsigned long flags; | ||
1338 | |||
1339 | spin_lock_irqsave(&info->receive_queue_lock, flags); | ||
1340 | if (!list_empty(&info->receive_queue)) { | ||
1341 | ret = list_first_entry( | ||
1342 | &info->receive_queue, | ||
1343 | struct smbd_response, list); | ||
1344 | list_del(&ret->list); | ||
1345 | info->count_receive_queue--; | ||
1346 | info->count_get_receive_buffer++; | ||
1347 | } | ||
1348 | spin_unlock_irqrestore(&info->receive_queue_lock, flags); | ||
1349 | |||
1350 | return ret; | ||
1351 | } | ||
1352 | |||
1353 | /* | ||
1354 | * Return a receive buffer | ||
1355 | * Upon returning of a receive buffer, we can post new receive and extend | ||
1356 | * more receive credits to remote peer. This is done immediately after a | ||
1357 | * receive buffer is returned. | ||
1358 | */ | ||
1359 | static void put_receive_buffer( | ||
1360 | struct smbd_connection *info, struct smbd_response *response) | ||
1361 | { | ||
1362 | unsigned long flags; | ||
1363 | |||
1364 | ib_dma_unmap_single(info->id->device, response->sge.addr, | ||
1365 | response->sge.length, DMA_FROM_DEVICE); | ||
1366 | |||
1367 | spin_lock_irqsave(&info->receive_queue_lock, flags); | ||
1368 | list_add_tail(&response->list, &info->receive_queue); | ||
1369 | info->count_receive_queue++; | ||
1370 | info->count_put_receive_buffer++; | ||
1371 | spin_unlock_irqrestore(&info->receive_queue_lock, flags); | ||
1372 | |||
1373 | queue_work(info->workqueue, &info->post_send_credits_work); | ||
1374 | } | ||
1375 | |||
1376 | /* Preallocate all receive buffer on transport establishment */ | ||
1377 | static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) | ||
1378 | { | ||
1379 | int i; | ||
1380 | struct smbd_response *response; | ||
1381 | |||
1382 | INIT_LIST_HEAD(&info->reassembly_queue); | ||
1383 | spin_lock_init(&info->reassembly_queue_lock); | ||
1384 | info->reassembly_data_length = 0; | ||
1385 | info->reassembly_queue_length = 0; | ||
1386 | |||
1387 | INIT_LIST_HEAD(&info->receive_queue); | ||
1388 | spin_lock_init(&info->receive_queue_lock); | ||
1389 | info->count_receive_queue = 0; | ||
1390 | |||
1391 | INIT_LIST_HEAD(&info->empty_packet_queue); | ||
1392 | spin_lock_init(&info->empty_packet_queue_lock); | ||
1393 | info->count_empty_packet_queue = 0; | ||
1394 | |||
1395 | init_waitqueue_head(&info->wait_receive_queues); | ||
1396 | |||
1397 | for (i = 0; i < num_buf; i++) { | ||
1398 | response = mempool_alloc(info->response_mempool, GFP_KERNEL); | ||
1399 | if (!response) | ||
1400 | goto allocate_failed; | ||
1401 | |||
1402 | response->info = info; | ||
1403 | list_add_tail(&response->list, &info->receive_queue); | ||
1404 | info->count_receive_queue++; | ||
1405 | } | ||
1406 | |||
1407 | return 0; | ||
1408 | |||
1409 | allocate_failed: | ||
1410 | while (!list_empty(&info->receive_queue)) { | ||
1411 | response = list_first_entry( | ||
1412 | &info->receive_queue, | ||
1413 | struct smbd_response, list); | ||
1414 | list_del(&response->list); | ||
1415 | info->count_receive_queue--; | ||
1416 | |||
1417 | mempool_free(response, info->response_mempool); | ||
1418 | } | ||
1419 | return -ENOMEM; | ||
1420 | } | ||
1421 | |||
1422 | static void destroy_receive_buffers(struct smbd_connection *info) | ||
1423 | { | ||
1424 | struct smbd_response *response; | ||
1425 | |||
1426 | while ((response = get_receive_buffer(info))) | ||
1427 | mempool_free(response, info->response_mempool); | ||
1428 | |||
1429 | while ((response = get_empty_queue_buffer(info))) | ||
1430 | mempool_free(response, info->response_mempool); | ||
1431 | } | ||
1432 | |||
1433 | /* | ||
1434 | * Check and send an immediate or keep alive packet | ||
1435 | * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1 | ||
1436 | * Connection.KeepaliveRequested and Connection.SendImmediate | ||
1437 | * The idea is to extend credits to server as soon as it becomes available | ||
1438 | */ | ||
1439 | static void send_immediate_work(struct work_struct *work) | ||
1440 | { | ||
1441 | struct smbd_connection *info = container_of( | ||
1442 | work, struct smbd_connection, | ||
1443 | send_immediate_work.work); | ||
1444 | |||
1445 | if (info->keep_alive_requested == KEEP_ALIVE_PENDING || | ||
1446 | info->send_immediate) { | ||
1447 | log_keep_alive(INFO, "send an empty message\n"); | ||
1448 | smbd_post_send_empty(info); | ||
1449 | } | ||
1450 | } | ||
1451 | |||
1452 | /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */ | ||
1453 | static void idle_connection_timer(struct work_struct *work) | ||
1454 | { | ||
1455 | struct smbd_connection *info = container_of( | ||
1456 | work, struct smbd_connection, | ||
1457 | idle_timer_work.work); | ||
1458 | |||
1459 | if (info->keep_alive_requested != KEEP_ALIVE_NONE) { | ||
1460 | log_keep_alive(ERR, | ||
1461 | "error status info->keep_alive_requested=%d\n", | ||
1462 | info->keep_alive_requested); | ||
1463 | smbd_disconnect_rdma_connection(info); | ||
1464 | return; | ||
1465 | } | ||
1466 | |||
1467 | log_keep_alive(INFO, "about to send an empty idle message\n"); | ||
1468 | smbd_post_send_empty(info); | ||
1469 | |||
1470 | /* Setup the next idle timeout work */ | ||
1471 | queue_delayed_work(info->workqueue, &info->idle_timer_work, | ||
1472 | info->keep_alive_interval*HZ); | ||
1473 | } | ||
1474 | |||
1475 | /* Destroy this SMBD connection, called from upper layer */ | ||
1476 | void smbd_destroy(struct smbd_connection *info) | ||
1477 | { | ||
1478 | log_rdma_event(INFO, "destroying rdma session\n"); | ||
1479 | |||
1480 | /* Kick off the disconnection process */ | ||
1481 | smbd_disconnect_rdma_connection(info); | ||
1482 | |||
1483 | log_rdma_event(INFO, "wait for transport being destroyed\n"); | ||
1484 | wait_event(info->wait_destroy, | ||
1485 | info->transport_status == SMBD_DESTROYED); | ||
1486 | |||
1487 | destroy_workqueue(info->workqueue); | ||
1488 | kfree(info); | ||
1489 | } | ||
1490 | |||
1491 | /* | ||
1492 | * Reconnect this SMBD connection, called from upper layer | ||
1493 | * return value: 0 on success, or actual error code | ||
1494 | */ | ||
1495 | int smbd_reconnect(struct TCP_Server_Info *server) | ||
1496 | { | ||
1497 | log_rdma_event(INFO, "reconnecting rdma session\n"); | ||
1498 | |||
1499 | if (!server->smbd_conn) { | ||
1500 | log_rdma_event(ERR, "rdma session already destroyed\n"); | ||
1501 | return -EINVAL; | ||
1502 | } | ||
1503 | |||
1504 | /* | ||
1505 | * This is possible if transport is disconnected and we haven't received | ||
1506 | * notification from RDMA, but upper layer has detected timeout | ||
1507 | */ | ||
1508 | if (server->smbd_conn->transport_status == SMBD_CONNECTED) { | ||
1509 | log_rdma_event(INFO, "disconnecting transport\n"); | ||
1510 | smbd_disconnect_rdma_connection(server->smbd_conn); | ||
1511 | } | ||
1512 | |||
1513 | /* wait until the transport is destroyed */ | ||
1514 | wait_event(server->smbd_conn->wait_destroy, | ||
1515 | server->smbd_conn->transport_status == SMBD_DESTROYED); | ||
1516 | |||
1517 | destroy_workqueue(server->smbd_conn->workqueue); | ||
1518 | kfree(server->smbd_conn); | ||
1519 | |||
1520 | log_rdma_event(INFO, "creating rdma session\n"); | ||
1521 | server->smbd_conn = smbd_get_connection( | ||
1522 | server, (struct sockaddr *) &server->dstaddr); | ||
1523 | |||
1524 | return server->smbd_conn ? 0 : -ENOENT; | ||
1525 | } | ||
1526 | |||
1527 | static void destroy_caches_and_workqueue(struct smbd_connection *info) | ||
1528 | { | ||
1529 | destroy_receive_buffers(info); | ||
1530 | destroy_workqueue(info->workqueue); | ||
1531 | mempool_destroy(info->response_mempool); | ||
1532 | kmem_cache_destroy(info->response_cache); | ||
1533 | mempool_destroy(info->request_mempool); | ||
1534 | kmem_cache_destroy(info->request_cache); | ||
1535 | } | ||
1536 | |||
1537 | #define MAX_NAME_LEN 80 | ||
1538 | static int allocate_caches_and_workqueue(struct smbd_connection *info) | ||
1539 | { | ||
1540 | char name[MAX_NAME_LEN]; | ||
1541 | int rc; | ||
1542 | |||
1543 | snprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); | ||
1544 | info->request_cache = | ||
1545 | kmem_cache_create( | ||
1546 | name, | ||
1547 | sizeof(struct smbd_request) + | ||
1548 | sizeof(struct smbd_data_transfer), | ||
1549 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
1550 | if (!info->request_cache) | ||
1551 | return -ENOMEM; | ||
1552 | |||
1553 | info->request_mempool = | ||
1554 | mempool_create(info->send_credit_target, mempool_alloc_slab, | ||
1555 | mempool_free_slab, info->request_cache); | ||
1556 | if (!info->request_mempool) | ||
1557 | goto out1; | ||
1558 | |||
1559 | snprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); | ||
1560 | info->response_cache = | ||
1561 | kmem_cache_create( | ||
1562 | name, | ||
1563 | sizeof(struct smbd_response) + | ||
1564 | info->max_receive_size, | ||
1565 | 0, SLAB_HWCACHE_ALIGN, NULL); | ||
1566 | if (!info->response_cache) | ||
1567 | goto out2; | ||
1568 | |||
1569 | info->response_mempool = | ||
1570 | mempool_create(info->receive_credit_max, mempool_alloc_slab, | ||
1571 | mempool_free_slab, info->response_cache); | ||
1572 | if (!info->response_mempool) | ||
1573 | goto out3; | ||
1574 | |||
1575 | snprintf(name, MAX_NAME_LEN, "smbd_%p", info); | ||
1576 | info->workqueue = create_workqueue(name); | ||
1577 | if (!info->workqueue) | ||
1578 | goto out4; | ||
1579 | |||
1580 | rc = allocate_receive_buffers(info, info->receive_credit_max); | ||
1581 | if (rc) { | ||
1582 | log_rdma_event(ERR, "failed to allocate receive buffers\n"); | ||
1583 | goto out5; | ||
1584 | } | ||
1585 | |||
1586 | return 0; | ||
1587 | |||
1588 | out5: | ||
1589 | destroy_workqueue(info->workqueue); | ||
1590 | out4: | ||
1591 | mempool_destroy(info->response_mempool); | ||
1592 | out3: | ||
1593 | kmem_cache_destroy(info->response_cache); | ||
1594 | out2: | ||
1595 | mempool_destroy(info->request_mempool); | ||
1596 | out1: | ||
1597 | kmem_cache_destroy(info->request_cache); | ||
1598 | return -ENOMEM; | ||
1599 | } | ||
1600 | |||
1601 | /* Create a SMBD connection, called by upper layer */ | ||
1602 | static struct smbd_connection *_smbd_get_connection( | ||
1603 | struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port) | ||
1604 | { | ||
1605 | int rc; | ||
1606 | struct smbd_connection *info; | ||
1607 | struct rdma_conn_param conn_param; | ||
1608 | struct ib_qp_init_attr qp_attr; | ||
1609 | struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; | ||
1610 | struct ib_port_immutable port_immutable; | ||
1611 | u32 ird_ord_hdr[2]; | ||
1612 | |||
1613 | info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL); | ||
1614 | if (!info) | ||
1615 | return NULL; | ||
1616 | |||
1617 | info->transport_status = SMBD_CONNECTING; | ||
1618 | rc = smbd_ia_open(info, dstaddr, port); | ||
1619 | if (rc) { | ||
1620 | log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc); | ||
1621 | goto create_id_failed; | ||
1622 | } | ||
1623 | |||
1624 | if (smbd_send_credit_target > info->id->device->attrs.max_cqe || | ||
1625 | smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { | ||
1626 | log_rdma_event(ERR, | ||
1627 | "consider lowering send_credit_target = %d. " | ||
1628 | "Possible CQE overrun, device " | ||
1629 | "reporting max_cpe %d max_qp_wr %d\n", | ||
1630 | smbd_send_credit_target, | ||
1631 | info->id->device->attrs.max_cqe, | ||
1632 | info->id->device->attrs.max_qp_wr); | ||
1633 | goto config_failed; | ||
1634 | } | ||
1635 | |||
1636 | if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || | ||
1637 | smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { | ||
1638 | log_rdma_event(ERR, | ||
1639 | "consider lowering receive_credit_max = %d. " | ||
1640 | "Possible CQE overrun, device " | ||
1641 | "reporting max_cpe %d max_qp_wr %d\n", | ||
1642 | smbd_receive_credit_max, | ||
1643 | info->id->device->attrs.max_cqe, | ||
1644 | info->id->device->attrs.max_qp_wr); | ||
1645 | goto config_failed; | ||
1646 | } | ||
1647 | |||
1648 | info->receive_credit_max = smbd_receive_credit_max; | ||
1649 | info->send_credit_target = smbd_send_credit_target; | ||
1650 | info->max_send_size = smbd_max_send_size; | ||
1651 | info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; | ||
1652 | info->max_receive_size = smbd_max_receive_size; | ||
1653 | info->keep_alive_interval = smbd_keep_alive_interval; | ||
1654 | |||
1655 | if (info->id->device->attrs.max_sge < SMBDIRECT_MAX_SGE) { | ||
1656 | log_rdma_event(ERR, "warning: device max_sge = %d too small\n", | ||
1657 | info->id->device->attrs.max_sge); | ||
1658 | log_rdma_event(ERR, "Queue Pair creation may fail\n"); | ||
1659 | } | ||
1660 | |||
1661 | info->send_cq = NULL; | ||
1662 | info->recv_cq = NULL; | ||
1663 | info->send_cq = ib_alloc_cq(info->id->device, info, | ||
1664 | info->send_credit_target, 0, IB_POLL_SOFTIRQ); | ||
1665 | if (IS_ERR(info->send_cq)) { | ||
1666 | info->send_cq = NULL; | ||
1667 | goto alloc_cq_failed; | ||
1668 | } | ||
1669 | |||
1670 | info->recv_cq = ib_alloc_cq(info->id->device, info, | ||
1671 | info->receive_credit_max, 0, IB_POLL_SOFTIRQ); | ||
1672 | if (IS_ERR(info->recv_cq)) { | ||
1673 | info->recv_cq = NULL; | ||
1674 | goto alloc_cq_failed; | ||
1675 | } | ||
1676 | |||
1677 | memset(&qp_attr, 0, sizeof(qp_attr)); | ||
1678 | qp_attr.event_handler = smbd_qp_async_error_upcall; | ||
1679 | qp_attr.qp_context = info; | ||
1680 | qp_attr.cap.max_send_wr = info->send_credit_target; | ||
1681 | qp_attr.cap.max_recv_wr = info->receive_credit_max; | ||
1682 | qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE; | ||
1683 | qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE; | ||
1684 | qp_attr.cap.max_inline_data = 0; | ||
1685 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | ||
1686 | qp_attr.qp_type = IB_QPT_RC; | ||
1687 | qp_attr.send_cq = info->send_cq; | ||
1688 | qp_attr.recv_cq = info->recv_cq; | ||
1689 | qp_attr.port_num = ~0; | ||
1690 | |||
1691 | rc = rdma_create_qp(info->id, info->pd, &qp_attr); | ||
1692 | if (rc) { | ||
1693 | log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc); | ||
1694 | goto create_qp_failed; | ||
1695 | } | ||
1696 | |||
1697 | memset(&conn_param, 0, sizeof(conn_param)); | ||
1698 | conn_param.initiator_depth = 0; | ||
1699 | |||
1700 | conn_param.responder_resources = | ||
1701 | info->id->device->attrs.max_qp_rd_atom | ||
1702 | < SMBD_CM_RESPONDER_RESOURCES ? | ||
1703 | info->id->device->attrs.max_qp_rd_atom : | ||
1704 | SMBD_CM_RESPONDER_RESOURCES; | ||
1705 | info->responder_resources = conn_param.responder_resources; | ||
1706 | log_rdma_mr(INFO, "responder_resources=%d\n", | ||
1707 | info->responder_resources); | ||
1708 | |||
1709 | /* Need to send IRD/ORD in private data for iWARP */ | ||
1710 | info->id->device->get_port_immutable( | ||
1711 | info->id->device, info->id->port_num, &port_immutable); | ||
1712 | if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { | ||
1713 | ird_ord_hdr[0] = info->responder_resources; | ||
1714 | ird_ord_hdr[1] = 1; | ||
1715 | conn_param.private_data = ird_ord_hdr; | ||
1716 | conn_param.private_data_len = sizeof(ird_ord_hdr); | ||
1717 | } else { | ||
1718 | conn_param.private_data = NULL; | ||
1719 | conn_param.private_data_len = 0; | ||
1720 | } | ||
1721 | |||
1722 | conn_param.retry_count = SMBD_CM_RETRY; | ||
1723 | conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY; | ||
1724 | conn_param.flow_control = 0; | ||
1725 | init_waitqueue_head(&info->wait_destroy); | ||
1726 | |||
1727 | log_rdma_event(INFO, "connecting to IP %pI4 port %d\n", | ||
1728 | &addr_in->sin_addr, port); | ||
1729 | |||
1730 | init_waitqueue_head(&info->conn_wait); | ||
1731 | rc = rdma_connect(info->id, &conn_param); | ||
1732 | if (rc) { | ||
1733 | log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); | ||
1734 | goto rdma_connect_failed; | ||
1735 | } | ||
1736 | |||
1737 | wait_event_interruptible( | ||
1738 | info->conn_wait, info->transport_status != SMBD_CONNECTING); | ||
1739 | |||
1740 | if (info->transport_status != SMBD_CONNECTED) { | ||
1741 | log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); | ||
1742 | goto rdma_connect_failed; | ||
1743 | } | ||
1744 | |||
1745 | log_rdma_event(INFO, "rdma_connect connected\n"); | ||
1746 | |||
1747 | rc = allocate_caches_and_workqueue(info); | ||
1748 | if (rc) { | ||
1749 | log_rdma_event(ERR, "cache allocation failed\n"); | ||
1750 | goto allocate_cache_failed; | ||
1751 | } | ||
1752 | |||
1753 | init_waitqueue_head(&info->wait_send_queue); | ||
1754 | init_waitqueue_head(&info->wait_reassembly_queue); | ||
1755 | |||
1756 | INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); | ||
1757 | INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work); | ||
1758 | queue_delayed_work(info->workqueue, &info->idle_timer_work, | ||
1759 | info->keep_alive_interval*HZ); | ||
1760 | |||
1761 | init_waitqueue_head(&info->wait_smbd_send_pending); | ||
1762 | info->smbd_send_pending = 0; | ||
1763 | |||
1764 | init_waitqueue_head(&info->wait_smbd_recv_pending); | ||
1765 | info->smbd_recv_pending = 0; | ||
1766 | |||
1767 | init_waitqueue_head(&info->wait_send_pending); | ||
1768 | atomic_set(&info->send_pending, 0); | ||
1769 | |||
1770 | init_waitqueue_head(&info->wait_send_payload_pending); | ||
1771 | atomic_set(&info->send_payload_pending, 0); | ||
1772 | |||
1773 | INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); | ||
1774 | INIT_WORK(&info->destroy_work, smbd_destroy_rdma_work); | ||
1775 | INIT_WORK(&info->recv_done_work, smbd_recv_done_work); | ||
1776 | INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); | ||
1777 | info->new_credits_offered = 0; | ||
1778 | spin_lock_init(&info->lock_new_credits_offered); | ||
1779 | |||
1780 | rc = smbd_negotiate(info); | ||
1781 | if (rc) { | ||
1782 | log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc); | ||
1783 | goto negotiation_failed; | ||
1784 | } | ||
1785 | |||
1786 | rc = allocate_mr_list(info); | ||
1787 | if (rc) { | ||
1788 | log_rdma_mr(ERR, "memory registration allocation failed\n"); | ||
1789 | goto allocate_mr_failed; | ||
1790 | } | ||
1791 | |||
1792 | return info; | ||
1793 | |||
1794 | allocate_mr_failed: | ||
1795 | /* At this point, need to a full transport shutdown */ | ||
1796 | smbd_destroy(info); | ||
1797 | return NULL; | ||
1798 | |||
1799 | negotiation_failed: | ||
1800 | cancel_delayed_work_sync(&info->idle_timer_work); | ||
1801 | destroy_caches_and_workqueue(info); | ||
1802 | info->transport_status = SMBD_NEGOTIATE_FAILED; | ||
1803 | init_waitqueue_head(&info->conn_wait); | ||
1804 | rdma_disconnect(info->id); | ||
1805 | wait_event(info->conn_wait, | ||
1806 | info->transport_status == SMBD_DISCONNECTED); | ||
1807 | |||
1808 | allocate_cache_failed: | ||
1809 | rdma_connect_failed: | ||
1810 | rdma_destroy_qp(info->id); | ||
1811 | |||
1812 | create_qp_failed: | ||
1813 | alloc_cq_failed: | ||
1814 | if (info->send_cq) | ||
1815 | ib_free_cq(info->send_cq); | ||
1816 | if (info->recv_cq) | ||
1817 | ib_free_cq(info->recv_cq); | ||
1818 | |||
1819 | config_failed: | ||
1820 | ib_dealloc_pd(info->pd); | ||
1821 | rdma_destroy_id(info->id); | ||
1822 | |||
1823 | create_id_failed: | ||
1824 | kfree(info); | ||
1825 | return NULL; | ||
1826 | } | ||
1827 | |||
1828 | struct smbd_connection *smbd_get_connection( | ||
1829 | struct TCP_Server_Info *server, struct sockaddr *dstaddr) | ||
1830 | { | ||
1831 | struct smbd_connection *ret; | ||
1832 | int port = SMBD_PORT; | ||
1833 | |||
1834 | try_again: | ||
1835 | ret = _smbd_get_connection(server, dstaddr, port); | ||
1836 | |||
1837 | /* Try SMB_PORT if SMBD_PORT doesn't work */ | ||
1838 | if (!ret && port == SMBD_PORT) { | ||
1839 | port = SMB_PORT; | ||
1840 | goto try_again; | ||
1841 | } | ||
1842 | return ret; | ||
1843 | } | ||
1844 | |||
1845 | /* | ||
1846 | * Receive data from receive reassembly queue | ||
1847 | * All the incoming data packets are placed in reassembly queue | ||
1848 | * buf: the buffer to read data into | ||
1849 | * size: the length of data to read | ||
1850 | * return value: actual data read | ||
1851 | * Note: this implementation copies the data from reassebmly queue to receive | ||
1852 | * buffers used by upper layer. This is not the optimal code path. A better way | ||
1853 | * to do it is to not have upper layer allocate its receive buffers but rather | ||
1854 | * borrow the buffer from reassembly queue, and return it after data is | ||
1855 | * consumed. But this will require more changes to upper layer code, and also | ||
1856 | * need to consider packet boundaries while they still being reassembled. | ||
1857 | */ | ||
1858 | static int smbd_recv_buf(struct smbd_connection *info, char *buf, | ||
1859 | unsigned int size) | ||
1860 | { | ||
1861 | struct smbd_response *response; | ||
1862 | struct smbd_data_transfer *data_transfer; | ||
1863 | int to_copy, to_read, data_read, offset; | ||
1864 | u32 data_length, remaining_data_length, data_offset; | ||
1865 | int rc; | ||
1866 | |||
1867 | again: | ||
1868 | if (info->transport_status != SMBD_CONNECTED) { | ||
1869 | log_read(ERR, "disconnected\n"); | ||
1870 | return -ENODEV; | ||
1871 | } | ||
1872 | |||
1873 | /* | ||
1874 | * No need to hold the reassembly queue lock all the time as we are | ||
1875 | * the only one reading from the front of the queue. The transport | ||
1876 | * may add more entries to the back of the queue at the same time | ||
1877 | */ | ||
1878 | log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, | ||
1879 | info->reassembly_data_length); | ||
1880 | if (info->reassembly_data_length >= size) { | ||
1881 | int queue_length; | ||
1882 | int queue_removed = 0; | ||
1883 | |||
1884 | /* | ||
1885 | * Need to make sure reassembly_data_length is read before | ||
1886 | * reading reassembly_queue_length and calling | ||
1887 | * _get_first_reassembly. This call is lock free | ||
1888 | * as we never read at the end of the queue which are being | ||
1889 | * updated in SOFTIRQ as more data is received | ||
1890 | */ | ||
1891 | virt_rmb(); | ||
1892 | queue_length = info->reassembly_queue_length; | ||
1893 | data_read = 0; | ||
1894 | to_read = size; | ||
1895 | offset = info->first_entry_offset; | ||
1896 | while (data_read < size) { | ||
1897 | response = _get_first_reassembly(info); | ||
1898 | data_transfer = smbd_response_payload(response); | ||
1899 | data_length = le32_to_cpu(data_transfer->data_length); | ||
1900 | remaining_data_length = | ||
1901 | le32_to_cpu( | ||
1902 | data_transfer->remaining_data_length); | ||
1903 | data_offset = le32_to_cpu(data_transfer->data_offset); | ||
1904 | |||
1905 | /* | ||
1906 | * The upper layer expects RFC1002 length at the | ||
1907 | * beginning of the payload. Return it to indicate | ||
1908 | * the total length of the packet. This minimize the | ||
1909 | * change to upper layer packet processing logic. This | ||
1910 | * will be eventually remove when an intermediate | ||
1911 | * transport layer is added | ||
1912 | */ | ||
1913 | if (response->first_segment && size == 4) { | ||
1914 | unsigned int rfc1002_len = | ||
1915 | data_length + remaining_data_length; | ||
1916 | *((__be32 *)buf) = cpu_to_be32(rfc1002_len); | ||
1917 | data_read = 4; | ||
1918 | response->first_segment = false; | ||
1919 | log_read(INFO, "returning rfc1002 length %d\n", | ||
1920 | rfc1002_len); | ||
1921 | goto read_rfc1002_done; | ||
1922 | } | ||
1923 | |||
1924 | to_copy = min_t(int, data_length - offset, to_read); | ||
1925 | memcpy( | ||
1926 | buf + data_read, | ||
1927 | (char *)data_transfer + data_offset + offset, | ||
1928 | to_copy); | ||
1929 | |||
1930 | /* move on to the next buffer? */ | ||
1931 | if (to_copy == data_length - offset) { | ||
1932 | queue_length--; | ||
1933 | /* | ||
1934 | * No need to lock if we are not at the | ||
1935 | * end of the queue | ||
1936 | */ | ||
1937 | if (!queue_length) | ||
1938 | spin_lock_irq( | ||
1939 | &info->reassembly_queue_lock); | ||
1940 | list_del(&response->list); | ||
1941 | queue_removed++; | ||
1942 | if (!queue_length) | ||
1943 | spin_unlock_irq( | ||
1944 | &info->reassembly_queue_lock); | ||
1945 | |||
1946 | info->count_reassembly_queue--; | ||
1947 | info->count_dequeue_reassembly_queue++; | ||
1948 | put_receive_buffer(info, response); | ||
1949 | offset = 0; | ||
1950 | log_read(INFO, "put_receive_buffer offset=0\n"); | ||
1951 | } else | ||
1952 | offset += to_copy; | ||
1953 | |||
1954 | to_read -= to_copy; | ||
1955 | data_read += to_copy; | ||
1956 | |||
1957 | log_read(INFO, "_get_first_reassembly memcpy %d bytes " | ||
1958 | "data_transfer_length-offset=%d after that " | ||
1959 | "to_read=%d data_read=%d offset=%d\n", | ||
1960 | to_copy, data_length - offset, | ||
1961 | to_read, data_read, offset); | ||
1962 | } | ||
1963 | |||
1964 | spin_lock_irq(&info->reassembly_queue_lock); | ||
1965 | info->reassembly_data_length -= data_read; | ||
1966 | info->reassembly_queue_length -= queue_removed; | ||
1967 | spin_unlock_irq(&info->reassembly_queue_lock); | ||
1968 | |||
1969 | info->first_entry_offset = offset; | ||
1970 | log_read(INFO, "returning to thread data_read=%d " | ||
1971 | "reassembly_data_length=%d first_entry_offset=%d\n", | ||
1972 | data_read, info->reassembly_data_length, | ||
1973 | info->first_entry_offset); | ||
1974 | read_rfc1002_done: | ||
1975 | return data_read; | ||
1976 | } | ||
1977 | |||
1978 | log_read(INFO, "wait_event on more data\n"); | ||
1979 | rc = wait_event_interruptible( | ||
1980 | info->wait_reassembly_queue, | ||
1981 | info->reassembly_data_length >= size || | ||
1982 | info->transport_status != SMBD_CONNECTED); | ||
1983 | /* Don't return any data if interrupted */ | ||
1984 | if (rc) | ||
1985 | return -ENODEV; | ||
1986 | |||
1987 | goto again; | ||
1988 | } | ||
1989 | |||
1990 | /* | ||
1991 | * Receive a page from receive reassembly queue | ||
1992 | * page: the page to read data into | ||
1993 | * to_read: the length of data to read | ||
1994 | * return value: actual data read | ||
1995 | */ | ||
1996 | static int smbd_recv_page(struct smbd_connection *info, | ||
1997 | struct page *page, unsigned int to_read) | ||
1998 | { | ||
1999 | int ret; | ||
2000 | char *to_address; | ||
2001 | |||
2002 | /* make sure we have the page ready for read */ | ||
2003 | ret = wait_event_interruptible( | ||
2004 | info->wait_reassembly_queue, | ||
2005 | info->reassembly_data_length >= to_read || | ||
2006 | info->transport_status != SMBD_CONNECTED); | ||
2007 | if (ret) | ||
2008 | return 0; | ||
2009 | |||
2010 | /* now we can read from reassembly queue and not sleep */ | ||
2011 | to_address = kmap_atomic(page); | ||
2012 | |||
2013 | log_read(INFO, "reading from page=%p address=%p to_read=%d\n", | ||
2014 | page, to_address, to_read); | ||
2015 | |||
2016 | ret = smbd_recv_buf(info, to_address, to_read); | ||
2017 | kunmap_atomic(to_address); | ||
2018 | |||
2019 | return ret; | ||
2020 | } | ||
2021 | |||
2022 | /* | ||
2023 | * Receive data from transport | ||
2024 | * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC | ||
2025 | * return: total bytes read, or 0. SMB Direct will not do partial read. | ||
2026 | */ | ||
2027 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg) | ||
2028 | { | ||
2029 | char *buf; | ||
2030 | struct page *page; | ||
2031 | unsigned int to_read; | ||
2032 | int rc; | ||
2033 | |||
2034 | info->smbd_recv_pending++; | ||
2035 | |||
2036 | switch (msg->msg_iter.type) { | ||
2037 | case READ | ITER_KVEC: | ||
2038 | buf = msg->msg_iter.kvec->iov_base; | ||
2039 | to_read = msg->msg_iter.kvec->iov_len; | ||
2040 | rc = smbd_recv_buf(info, buf, to_read); | ||
2041 | break; | ||
2042 | |||
2043 | case READ | ITER_BVEC: | ||
2044 | page = msg->msg_iter.bvec->bv_page; | ||
2045 | to_read = msg->msg_iter.bvec->bv_len; | ||
2046 | rc = smbd_recv_page(info, page, to_read); | ||
2047 | break; | ||
2048 | |||
2049 | default: | ||
2050 | /* It's a bug in upper layer to get there */ | ||
2051 | cifs_dbg(VFS, "CIFS: invalid msg type %d\n", | ||
2052 | msg->msg_iter.type); | ||
2053 | rc = -EIO; | ||
2054 | } | ||
2055 | |||
2056 | info->smbd_recv_pending--; | ||
2057 | wake_up(&info->wait_smbd_recv_pending); | ||
2058 | |||
2059 | /* SMBDirect will read it all or nothing */ | ||
2060 | if (rc > 0) | ||
2061 | msg->msg_iter.count = 0; | ||
2062 | return rc; | ||
2063 | } | ||
2064 | |||
2065 | /* | ||
2066 | * Send data to transport | ||
2067 | * Each rqst is transported as a SMBDirect payload | ||
2068 | * rqst: the data to write | ||
2069 | * return value: 0 if successfully write, otherwise error code | ||
2070 | */ | ||
2071 | int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) | ||
2072 | { | ||
2073 | struct kvec vec; | ||
2074 | int nvecs; | ||
2075 | int size; | ||
2076 | int buflen = 0, remaining_data_length; | ||
2077 | int start, i, j; | ||
2078 | int max_iov_size = | ||
2079 | info->max_send_size - sizeof(struct smbd_data_transfer); | ||
2080 | struct kvec iov[SMBDIRECT_MAX_SGE]; | ||
2081 | int rc; | ||
2082 | |||
2083 | info->smbd_send_pending++; | ||
2084 | if (info->transport_status != SMBD_CONNECTED) { | ||
2085 | rc = -ENODEV; | ||
2086 | goto done; | ||
2087 | } | ||
2088 | |||
2089 | /* | ||
2090 | * This usually means a configuration error | ||
2091 | * We use RDMA read/write for packet size > rdma_readwrite_threshold | ||
2092 | * as long as it's properly configured we should never get into this | ||
2093 | * situation | ||
2094 | */ | ||
2095 | if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) { | ||
2096 | log_write(ERR, "maximum send segment %x exceeding %x\n", | ||
2097 | rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE); | ||
2098 | rc = -EINVAL; | ||
2099 | goto done; | ||
2100 | } | ||
2101 | |||
2102 | /* | ||
2103 | * Remove the RFC1002 length defined in MS-SMB2 section 2.1 | ||
2104 | * It is used only for TCP transport | ||
2105 | * In future we may want to add a transport layer under protocol | ||
2106 | * layer so this will only be issued to TCP transport | ||
2107 | */ | ||
2108 | iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4; | ||
2109 | iov[0].iov_len = rqst->rq_iov[0].iov_len - 4; | ||
2110 | buflen += iov[0].iov_len; | ||
2111 | |||
2112 | /* total up iov array first */ | ||
2113 | for (i = 1; i < rqst->rq_nvec; i++) { | ||
2114 | iov[i].iov_base = rqst->rq_iov[i].iov_base; | ||
2115 | iov[i].iov_len = rqst->rq_iov[i].iov_len; | ||
2116 | buflen += iov[i].iov_len; | ||
2117 | } | ||
2118 | |||
2119 | /* add in the page array if there is one */ | ||
2120 | if (rqst->rq_npages) { | ||
2121 | buflen += rqst->rq_pagesz * (rqst->rq_npages - 1); | ||
2122 | buflen += rqst->rq_tailsz; | ||
2123 | } | ||
2124 | |||
2125 | if (buflen + sizeof(struct smbd_data_transfer) > | ||
2126 | info->max_fragmented_send_size) { | ||
2127 | log_write(ERR, "payload size %d > max size %d\n", | ||
2128 | buflen, info->max_fragmented_send_size); | ||
2129 | rc = -EINVAL; | ||
2130 | goto done; | ||
2131 | } | ||
2132 | |||
2133 | remaining_data_length = buflen; | ||
2134 | |||
2135 | log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " | ||
2136 | "rq_tailsz=%d buflen=%d\n", | ||
2137 | rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz, | ||
2138 | rqst->rq_tailsz, buflen); | ||
2139 | |||
2140 | start = i = iov[0].iov_len ? 0 : 1; | ||
2141 | buflen = 0; | ||
2142 | while (true) { | ||
2143 | buflen += iov[i].iov_len; | ||
2144 | if (buflen > max_iov_size) { | ||
2145 | if (i > start) { | ||
2146 | remaining_data_length -= | ||
2147 | (buflen-iov[i].iov_len); | ||
2148 | log_write(INFO, "sending iov[] from start=%d " | ||
2149 | "i=%d nvecs=%d " | ||
2150 | "remaining_data_length=%d\n", | ||
2151 | start, i, i-start, | ||
2152 | remaining_data_length); | ||
2153 | rc = smbd_post_send_data( | ||
2154 | info, &iov[start], i-start, | ||
2155 | remaining_data_length); | ||
2156 | if (rc) | ||
2157 | goto done; | ||
2158 | } else { | ||
2159 | /* iov[start] is too big, break it */ | ||
2160 | nvecs = (buflen+max_iov_size-1)/max_iov_size; | ||
2161 | log_write(INFO, "iov[%d] iov_base=%p buflen=%d" | ||
2162 | " break to %d vectors\n", | ||
2163 | start, iov[start].iov_base, | ||
2164 | buflen, nvecs); | ||
2165 | for (j = 0; j < nvecs; j++) { | ||
2166 | vec.iov_base = | ||
2167 | (char *)iov[start].iov_base + | ||
2168 | j*max_iov_size; | ||
2169 | vec.iov_len = max_iov_size; | ||
2170 | if (j == nvecs-1) | ||
2171 | vec.iov_len = | ||
2172 | buflen - | ||
2173 | max_iov_size*(nvecs-1); | ||
2174 | remaining_data_length -= vec.iov_len; | ||
2175 | log_write(INFO, | ||
2176 | "sending vec j=%d iov_base=%p" | ||
2177 | " iov_len=%zu " | ||
2178 | "remaining_data_length=%d\n", | ||
2179 | j, vec.iov_base, vec.iov_len, | ||
2180 | remaining_data_length); | ||
2181 | rc = smbd_post_send_data( | ||
2182 | info, &vec, 1, | ||
2183 | remaining_data_length); | ||
2184 | if (rc) | ||
2185 | goto done; | ||
2186 | } | ||
2187 | i++; | ||
2188 | } | ||
2189 | start = i; | ||
2190 | buflen = 0; | ||
2191 | } else { | ||
2192 | i++; | ||
2193 | if (i == rqst->rq_nvec) { | ||
2194 | /* send out all remaining vecs */ | ||
2195 | remaining_data_length -= buflen; | ||
2196 | log_write(INFO, | ||
2197 | "sending iov[] from start=%d i=%d " | ||
2198 | "nvecs=%d remaining_data_length=%d\n", | ||
2199 | start, i, i-start, | ||
2200 | remaining_data_length); | ||
2201 | rc = smbd_post_send_data(info, &iov[start], | ||
2202 | i-start, remaining_data_length); | ||
2203 | if (rc) | ||
2204 | goto done; | ||
2205 | break; | ||
2206 | } | ||
2207 | } | ||
2208 | log_write(INFO, "looping i=%d buflen=%d\n", i, buflen); | ||
2209 | } | ||
2210 | |||
2211 | /* now sending pages if there are any */ | ||
2212 | for (i = 0; i < rqst->rq_npages; i++) { | ||
2213 | buflen = (i == rqst->rq_npages-1) ? | ||
2214 | rqst->rq_tailsz : rqst->rq_pagesz; | ||
2215 | nvecs = (buflen + max_iov_size - 1) / max_iov_size; | ||
2216 | log_write(INFO, "sending pages buflen=%d nvecs=%d\n", | ||
2217 | buflen, nvecs); | ||
2218 | for (j = 0; j < nvecs; j++) { | ||
2219 | size = max_iov_size; | ||
2220 | if (j == nvecs-1) | ||
2221 | size = buflen - j*max_iov_size; | ||
2222 | remaining_data_length -= size; | ||
2223 | log_write(INFO, "sending pages i=%d offset=%d size=%d" | ||
2224 | " remaining_data_length=%d\n", | ||
2225 | i, j*max_iov_size, size, remaining_data_length); | ||
2226 | rc = smbd_post_send_page( | ||
2227 | info, rqst->rq_pages[i], j*max_iov_size, | ||
2228 | size, remaining_data_length); | ||
2229 | if (rc) | ||
2230 | goto done; | ||
2231 | } | ||
2232 | } | ||
2233 | |||
2234 | done: | ||
2235 | /* | ||
2236 | * As an optimization, we don't wait for individual I/O to finish | ||
2237 | * before sending the next one. | ||
2238 | * Send them all and wait for pending send count to get to 0 | ||
2239 | * that means all the I/Os have been out and we are good to return | ||
2240 | */ | ||
2241 | |||
2242 | wait_event(info->wait_send_payload_pending, | ||
2243 | atomic_read(&info->send_payload_pending) == 0); | ||
2244 | |||
2245 | info->smbd_send_pending--; | ||
2246 | wake_up(&info->wait_smbd_send_pending); | ||
2247 | |||
2248 | return rc; | ||
2249 | } | ||
2250 | |||
2251 | static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) | ||
2252 | { | ||
2253 | struct smbd_mr *mr; | ||
2254 | struct ib_cqe *cqe; | ||
2255 | |||
2256 | if (wc->status) { | ||
2257 | log_rdma_mr(ERR, "status=%d\n", wc->status); | ||
2258 | cqe = wc->wr_cqe; | ||
2259 | mr = container_of(cqe, struct smbd_mr, cqe); | ||
2260 | smbd_disconnect_rdma_connection(mr->conn); | ||
2261 | } | ||
2262 | } | ||
2263 | |||
2264 | /* | ||
2265 | * The work queue function that recovers MRs | ||
2266 | * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used | ||
2267 | * again. Both calls are slow, so finish them in a workqueue. This will not | ||
2268 | * block I/O path. | ||
2269 | * There is one workqueue that recovers MRs, there is no need to lock as the | ||
2270 | * I/O requests calling smbd_register_mr will never update the links in the | ||
2271 | * mr_list. | ||
2272 | */ | ||
2273 | static void smbd_mr_recovery_work(struct work_struct *work) | ||
2274 | { | ||
2275 | struct smbd_connection *info = | ||
2276 | container_of(work, struct smbd_connection, mr_recovery_work); | ||
2277 | struct smbd_mr *smbdirect_mr; | ||
2278 | int rc; | ||
2279 | |||
2280 | list_for_each_entry(smbdirect_mr, &info->mr_list, list) { | ||
2281 | if (smbdirect_mr->state == MR_INVALIDATED || | ||
2282 | smbdirect_mr->state == MR_ERROR) { | ||
2283 | |||
2284 | if (smbdirect_mr->state == MR_INVALIDATED) { | ||
2285 | ib_dma_unmap_sg( | ||
2286 | info->id->device, smbdirect_mr->sgl, | ||
2287 | smbdirect_mr->sgl_count, | ||
2288 | smbdirect_mr->dir); | ||
2289 | smbdirect_mr->state = MR_READY; | ||
2290 | } else if (smbdirect_mr->state == MR_ERROR) { | ||
2291 | |||
2292 | /* recover this MR entry */ | ||
2293 | rc = ib_dereg_mr(smbdirect_mr->mr); | ||
2294 | if (rc) { | ||
2295 | log_rdma_mr(ERR, | ||
2296 | "ib_dereg_mr faield rc=%x\n", | ||
2297 | rc); | ||
2298 | smbd_disconnect_rdma_connection(info); | ||
2299 | } | ||
2300 | |||
2301 | smbdirect_mr->mr = ib_alloc_mr( | ||
2302 | info->pd, info->mr_type, | ||
2303 | info->max_frmr_depth); | ||
2304 | if (IS_ERR(smbdirect_mr->mr)) { | ||
2305 | log_rdma_mr(ERR, | ||
2306 | "ib_alloc_mr failed mr_type=%x " | ||
2307 | "max_frmr_depth=%x\n", | ||
2308 | info->mr_type, | ||
2309 | info->max_frmr_depth); | ||
2310 | smbd_disconnect_rdma_connection(info); | ||
2311 | } | ||
2312 | |||
2313 | smbdirect_mr->state = MR_READY; | ||
2314 | } | ||
2315 | /* smbdirect_mr->state is updated by this function | ||
2316 | * and is read and updated by I/O issuing CPUs trying | ||
2317 | * to get a MR, the call to atomic_inc_return | ||
2318 | * implicates a memory barrier and guarantees this | ||
2319 | * value is updated before waking up any calls to | ||
2320 | * get_mr() from the I/O issuing CPUs | ||
2321 | */ | ||
2322 | if (atomic_inc_return(&info->mr_ready_count) == 1) | ||
2323 | wake_up_interruptible(&info->wait_mr); | ||
2324 | } | ||
2325 | } | ||
2326 | } | ||
2327 | |||
2328 | static void destroy_mr_list(struct smbd_connection *info) | ||
2329 | { | ||
2330 | struct smbd_mr *mr, *tmp; | ||
2331 | |||
2332 | cancel_work_sync(&info->mr_recovery_work); | ||
2333 | list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { | ||
2334 | if (mr->state == MR_INVALIDATED) | ||
2335 | ib_dma_unmap_sg(info->id->device, mr->sgl, | ||
2336 | mr->sgl_count, mr->dir); | ||
2337 | ib_dereg_mr(mr->mr); | ||
2338 | kfree(mr->sgl); | ||
2339 | kfree(mr); | ||
2340 | } | ||
2341 | } | ||
2342 | |||
2343 | /* | ||
2344 | * Allocate MRs used for RDMA read/write | ||
2345 | * The number of MRs will not exceed hardware capability in responder_resources | ||
2346 | * All MRs are kept in mr_list. The MR can be recovered after it's used | ||
2347 | * Recovery is done in smbd_mr_recovery_work. The content of list entry changes | ||
2348 | * as MRs are used and recovered for I/O, but the list links will not change | ||
2349 | */ | ||
2350 | static int allocate_mr_list(struct smbd_connection *info) | ||
2351 | { | ||
2352 | int i; | ||
2353 | struct smbd_mr *smbdirect_mr, *tmp; | ||
2354 | |||
2355 | INIT_LIST_HEAD(&info->mr_list); | ||
2356 | init_waitqueue_head(&info->wait_mr); | ||
2357 | spin_lock_init(&info->mr_list_lock); | ||
2358 | atomic_set(&info->mr_ready_count, 0); | ||
2359 | atomic_set(&info->mr_used_count, 0); | ||
2360 | init_waitqueue_head(&info->wait_for_mr_cleanup); | ||
2361 | /* Allocate more MRs (2x) than hardware responder_resources */ | ||
2362 | for (i = 0; i < info->responder_resources * 2; i++) { | ||
2363 | smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL); | ||
2364 | if (!smbdirect_mr) | ||
2365 | goto out; | ||
2366 | smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, | ||
2367 | info->max_frmr_depth); | ||
2368 | if (IS_ERR(smbdirect_mr->mr)) { | ||
2369 | log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x " | ||
2370 | "max_frmr_depth=%x\n", | ||
2371 | info->mr_type, info->max_frmr_depth); | ||
2372 | goto out; | ||
2373 | } | ||
2374 | smbdirect_mr->sgl = kcalloc( | ||
2375 | info->max_frmr_depth, | ||
2376 | sizeof(struct scatterlist), | ||
2377 | GFP_KERNEL); | ||
2378 | if (!smbdirect_mr->sgl) { | ||
2379 | log_rdma_mr(ERR, "failed to allocate sgl\n"); | ||
2380 | ib_dereg_mr(smbdirect_mr->mr); | ||
2381 | goto out; | ||
2382 | } | ||
2383 | smbdirect_mr->state = MR_READY; | ||
2384 | smbdirect_mr->conn = info; | ||
2385 | |||
2386 | list_add_tail(&smbdirect_mr->list, &info->mr_list); | ||
2387 | atomic_inc(&info->mr_ready_count); | ||
2388 | } | ||
2389 | INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); | ||
2390 | return 0; | ||
2391 | |||
2392 | out: | ||
2393 | kfree(smbdirect_mr); | ||
2394 | |||
2395 | list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { | ||
2396 | ib_dereg_mr(smbdirect_mr->mr); | ||
2397 | kfree(smbdirect_mr->sgl); | ||
2398 | kfree(smbdirect_mr); | ||
2399 | } | ||
2400 | return -ENOMEM; | ||
2401 | } | ||
2402 | |||
2403 | /* | ||
2404 | * Get a MR from mr_list. This function waits until there is at least one | ||
2405 | * MR available in the list. It may access the list while the | ||
2406 | * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock | ||
2407 | * as they never modify the same places. However, there may be several CPUs | ||
2408 | * issueing I/O trying to get MR at the same time, mr_list_lock is used to | ||
2409 | * protect this situation. | ||
2410 | */ | ||
2411 | static struct smbd_mr *get_mr(struct smbd_connection *info) | ||
2412 | { | ||
2413 | struct smbd_mr *ret; | ||
2414 | int rc; | ||
2415 | again: | ||
2416 | rc = wait_event_interruptible(info->wait_mr, | ||
2417 | atomic_read(&info->mr_ready_count) || | ||
2418 | info->transport_status != SMBD_CONNECTED); | ||
2419 | if (rc) { | ||
2420 | log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc); | ||
2421 | return NULL; | ||
2422 | } | ||
2423 | |||
2424 | if (info->transport_status != SMBD_CONNECTED) { | ||
2425 | log_rdma_mr(ERR, "info->transport_status=%x\n", | ||
2426 | info->transport_status); | ||
2427 | return NULL; | ||
2428 | } | ||
2429 | |||
2430 | spin_lock(&info->mr_list_lock); | ||
2431 | list_for_each_entry(ret, &info->mr_list, list) { | ||
2432 | if (ret->state == MR_READY) { | ||
2433 | ret->state = MR_REGISTERED; | ||
2434 | spin_unlock(&info->mr_list_lock); | ||
2435 | atomic_dec(&info->mr_ready_count); | ||
2436 | atomic_inc(&info->mr_used_count); | ||
2437 | return ret; | ||
2438 | } | ||
2439 | } | ||
2440 | |||
2441 | spin_unlock(&info->mr_list_lock); | ||
2442 | /* | ||
2443 | * It is possible that we could fail to get MR because other processes may | ||
2444 | * try to acquire a MR at the same time. If this is the case, retry it. | ||
2445 | */ | ||
2446 | goto again; | ||
2447 | } | ||
2448 | |||
2449 | /* | ||
2450 | * Register memory for RDMA read/write | ||
2451 | * pages[]: the list of pages to register memory with | ||
2452 | * num_pages: the number of pages to register | ||
2453 | * tailsz: if non-zero, the bytes to register in the last page | ||
2454 | * writing: true if this is a RDMA write (SMB read), false for RDMA read | ||
2455 | * need_invalidate: true if this MR needs to be locally invalidated after I/O | ||
2456 | * return value: the MR registered, NULL if failed. | ||
2457 | */ | ||
2458 | struct smbd_mr *smbd_register_mr( | ||
2459 | struct smbd_connection *info, struct page *pages[], int num_pages, | ||
2460 | int tailsz, bool writing, bool need_invalidate) | ||
2461 | { | ||
2462 | struct smbd_mr *smbdirect_mr; | ||
2463 | int rc, i; | ||
2464 | enum dma_data_direction dir; | ||
2465 | struct ib_reg_wr *reg_wr; | ||
2466 | struct ib_send_wr *bad_wr; | ||
2467 | |||
2468 | if (num_pages > info->max_frmr_depth) { | ||
2469 | log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n", | ||
2470 | num_pages, info->max_frmr_depth); | ||
2471 | return NULL; | ||
2472 | } | ||
2473 | |||
2474 | smbdirect_mr = get_mr(info); | ||
2475 | if (!smbdirect_mr) { | ||
2476 | log_rdma_mr(ERR, "get_mr returning NULL\n"); | ||
2477 | return NULL; | ||
2478 | } | ||
2479 | smbdirect_mr->need_invalidate = need_invalidate; | ||
2480 | smbdirect_mr->sgl_count = num_pages; | ||
2481 | sg_init_table(smbdirect_mr->sgl, num_pages); | ||
2482 | |||
2483 | for (i = 0; i < num_pages - 1; i++) | ||
2484 | sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0); | ||
2485 | |||
2486 | sg_set_page(&smbdirect_mr->sgl[i], pages[i], | ||
2487 | tailsz ? tailsz : PAGE_SIZE, 0); | ||
2488 | |||
2489 | dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
2490 | smbdirect_mr->dir = dir; | ||
2491 | rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir); | ||
2492 | if (!rc) { | ||
2493 | log_rdma_mr(INFO, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n", | ||
2494 | num_pages, dir, rc); | ||
2495 | goto dma_map_error; | ||
2496 | } | ||
2497 | |||
2498 | rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages, | ||
2499 | NULL, PAGE_SIZE); | ||
2500 | if (rc != num_pages) { | ||
2501 | log_rdma_mr(INFO, | ||
2502 | "ib_map_mr_sg failed rc = %x num_pages = %x\n", | ||
2503 | rc, num_pages); | ||
2504 | goto map_mr_error; | ||
2505 | } | ||
2506 | |||
2507 | ib_update_fast_reg_key(smbdirect_mr->mr, | ||
2508 | ib_inc_rkey(smbdirect_mr->mr->rkey)); | ||
2509 | reg_wr = &smbdirect_mr->wr; | ||
2510 | reg_wr->wr.opcode = IB_WR_REG_MR; | ||
2511 | smbdirect_mr->cqe.done = register_mr_done; | ||
2512 | reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; | ||
2513 | reg_wr->wr.num_sge = 0; | ||
2514 | reg_wr->wr.send_flags = IB_SEND_SIGNALED; | ||
2515 | reg_wr->mr = smbdirect_mr->mr; | ||
2516 | reg_wr->key = smbdirect_mr->mr->rkey; | ||
2517 | reg_wr->access = writing ? | ||
2518 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | ||
2519 | IB_ACCESS_REMOTE_READ; | ||
2520 | |||
2521 | /* | ||
2522 | * There is no need for waiting for complemtion on ib_post_send | ||
2523 | * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution | ||
2524 | * on the next ib_post_send when we actaully send I/O to remote peer | ||
2525 | */ | ||
2526 | rc = ib_post_send(info->id->qp, ®_wr->wr, &bad_wr); | ||
2527 | if (!rc) | ||
2528 | return smbdirect_mr; | ||
2529 | |||
2530 | log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", | ||
2531 | rc, reg_wr->key); | ||
2532 | |||
2533 | /* If all failed, attempt to recover this MR by setting it MR_ERROR*/ | ||
2534 | map_mr_error: | ||
2535 | ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl, | ||
2536 | smbdirect_mr->sgl_count, smbdirect_mr->dir); | ||
2537 | |||
2538 | dma_map_error: | ||
2539 | smbdirect_mr->state = MR_ERROR; | ||
2540 | if (atomic_dec_and_test(&info->mr_used_count)) | ||
2541 | wake_up(&info->wait_for_mr_cleanup); | ||
2542 | |||
2543 | return NULL; | ||
2544 | } | ||
2545 | |||
2546 | static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) | ||
2547 | { | ||
2548 | struct smbd_mr *smbdirect_mr; | ||
2549 | struct ib_cqe *cqe; | ||
2550 | |||
2551 | cqe = wc->wr_cqe; | ||
2552 | smbdirect_mr = container_of(cqe, struct smbd_mr, cqe); | ||
2553 | smbdirect_mr->state = MR_INVALIDATED; | ||
2554 | if (wc->status != IB_WC_SUCCESS) { | ||
2555 | log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); | ||
2556 | smbdirect_mr->state = MR_ERROR; | ||
2557 | } | ||
2558 | complete(&smbdirect_mr->invalidate_done); | ||
2559 | } | ||
2560 | |||
2561 | /* | ||
2562 | * Deregister a MR after I/O is done | ||
2563 | * This function may wait if remote invalidation is not used | ||
2564 | * and we have to locally invalidate the buffer to prevent data is being | ||
2565 | * modified by remote peer after upper layer consumes it | ||
2566 | */ | ||
2567 | int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) | ||
2568 | { | ||
2569 | struct ib_send_wr *wr, *bad_wr; | ||
2570 | struct smbd_connection *info = smbdirect_mr->conn; | ||
2571 | int rc = 0; | ||
2572 | |||
2573 | if (smbdirect_mr->need_invalidate) { | ||
2574 | /* Need to finish local invalidation before returning */ | ||
2575 | wr = &smbdirect_mr->inv_wr; | ||
2576 | wr->opcode = IB_WR_LOCAL_INV; | ||
2577 | smbdirect_mr->cqe.done = local_inv_done; | ||
2578 | wr->wr_cqe = &smbdirect_mr->cqe; | ||
2579 | wr->num_sge = 0; | ||
2580 | wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; | ||
2581 | wr->send_flags = IB_SEND_SIGNALED; | ||
2582 | |||
2583 | init_completion(&smbdirect_mr->invalidate_done); | ||
2584 | rc = ib_post_send(info->id->qp, wr, &bad_wr); | ||
2585 | if (rc) { | ||
2586 | log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); | ||
2587 | smbd_disconnect_rdma_connection(info); | ||
2588 | goto done; | ||
2589 | } | ||
2590 | wait_for_completion(&smbdirect_mr->invalidate_done); | ||
2591 | smbdirect_mr->need_invalidate = false; | ||
2592 | } else | ||
2593 | /* | ||
2594 | * For remote invalidation, just set it to MR_INVALIDATED | ||
2595 | * and defer to mr_recovery_work to recover the MR for next use | ||
2596 | */ | ||
2597 | smbdirect_mr->state = MR_INVALIDATED; | ||
2598 | |||
2599 | /* | ||
2600 | * Schedule the work to do MR recovery for future I/Os | ||
2601 | * MR recovery is slow and we don't want it to block the current I/O | ||
2602 | */ | ||
2603 | queue_work(info->workqueue, &info->mr_recovery_work); | ||
2604 | |||
2605 | done: | ||
2606 | if (atomic_dec_and_test(&info->mr_used_count)) | ||
2607 | wake_up(&info->wait_for_mr_cleanup); | ||
2608 | |||
2609 | return rc; | ||
2610 | } | ||
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h new file mode 100644 index 000000000000..f9038daea194 --- /dev/null +++ b/fs/cifs/smbdirect.h | |||
@@ -0,0 +1,338 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017, Microsoft Corporation. | ||
3 | * | ||
4 | * Author(s): Long Li <longli@microsoft.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
14 | * the GNU General Public License for more details. | ||
15 | */ | ||
16 | #ifndef _SMBDIRECT_H | ||
17 | #define _SMBDIRECT_H | ||
18 | |||
19 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
20 | #define cifs_rdma_enabled(server) ((server)->rdma) | ||
21 | |||
22 | #include "cifsglob.h" | ||
23 | #include <rdma/ib_verbs.h> | ||
24 | #include <rdma/rdma_cm.h> | ||
25 | #include <linux/mempool.h> | ||
26 | |||
27 | extern int rdma_readwrite_threshold; | ||
28 | extern int smbd_max_frmr_depth; | ||
29 | extern int smbd_keep_alive_interval; | ||
30 | extern int smbd_max_receive_size; | ||
31 | extern int smbd_max_fragmented_recv_size; | ||
32 | extern int smbd_max_send_size; | ||
33 | extern int smbd_send_credit_target; | ||
34 | extern int smbd_receive_credit_max; | ||
35 | |||
36 | enum keep_alive_status { | ||
37 | KEEP_ALIVE_NONE, | ||
38 | KEEP_ALIVE_PENDING, | ||
39 | KEEP_ALIVE_SENT, | ||
40 | }; | ||
41 | |||
42 | enum smbd_connection_status { | ||
43 | SMBD_CREATED, | ||
44 | SMBD_CONNECTING, | ||
45 | SMBD_CONNECTED, | ||
46 | SMBD_NEGOTIATE_FAILED, | ||
47 | SMBD_DISCONNECTING, | ||
48 | SMBD_DISCONNECTED, | ||
49 | SMBD_DESTROYED | ||
50 | }; | ||
51 | |||
52 | /* | ||
53 | * The context for the SMBDirect transport | ||
54 | * Everything related to the transport is here. It has several logical parts | ||
55 | * 1. RDMA related structures | ||
56 | * 2. SMBDirect connection parameters | ||
57 | * 3. Memory registrations | ||
58 | * 4. Receive and reassembly queues for data receive path | ||
59 | * 5. mempools for allocating packets | ||
60 | */ | ||
61 | struct smbd_connection { | ||
62 | enum smbd_connection_status transport_status; | ||
63 | |||
64 | /* RDMA related */ | ||
65 | struct rdma_cm_id *id; | ||
66 | struct ib_qp_init_attr qp_attr; | ||
67 | struct ib_pd *pd; | ||
68 | struct ib_cq *send_cq, *recv_cq; | ||
69 | struct ib_device_attr dev_attr; | ||
70 | int ri_rc; | ||
71 | struct completion ri_done; | ||
72 | wait_queue_head_t conn_wait; | ||
73 | wait_queue_head_t wait_destroy; | ||
74 | |||
75 | struct completion negotiate_completion; | ||
76 | bool negotiate_done; | ||
77 | |||
78 | struct work_struct destroy_work; | ||
79 | struct work_struct disconnect_work; | ||
80 | struct work_struct recv_done_work; | ||
81 | struct work_struct post_send_credits_work; | ||
82 | |||
83 | spinlock_t lock_new_credits_offered; | ||
84 | int new_credits_offered; | ||
85 | |||
86 | /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */ | ||
87 | int receive_credit_max; | ||
88 | int send_credit_target; | ||
89 | int max_send_size; | ||
90 | int max_fragmented_recv_size; | ||
91 | int max_fragmented_send_size; | ||
92 | int max_receive_size; | ||
93 | int keep_alive_interval; | ||
94 | int max_readwrite_size; | ||
95 | enum keep_alive_status keep_alive_requested; | ||
96 | int protocol; | ||
97 | atomic_t send_credits; | ||
98 | atomic_t receive_credits; | ||
99 | int receive_credit_target; | ||
100 | int fragment_reassembly_remaining; | ||
101 | |||
102 | /* Memory registrations */ | ||
103 | /* Maximum number of RDMA read/write outstanding on this connection */ | ||
104 | int responder_resources; | ||
105 | /* Maximum number of SGEs in a RDMA write/read */ | ||
106 | int max_frmr_depth; | ||
107 | /* | ||
108 | * If payload is less than or equal to the threshold, | ||
109 | * use RDMA send/recv to send upper layer I/O. | ||
110 | * If payload is more than the threshold, | ||
111 | * use RDMA read/write through memory registration for I/O. | ||
112 | */ | ||
113 | int rdma_readwrite_threshold; | ||
114 | enum ib_mr_type mr_type; | ||
115 | struct list_head mr_list; | ||
116 | spinlock_t mr_list_lock; | ||
117 | /* The number of available MRs ready for memory registration */ | ||
118 | atomic_t mr_ready_count; | ||
119 | atomic_t mr_used_count; | ||
120 | wait_queue_head_t wait_mr; | ||
121 | struct work_struct mr_recovery_work; | ||
122 | /* Used by transport to wait until all MRs are returned */ | ||
123 | wait_queue_head_t wait_for_mr_cleanup; | ||
124 | |||
125 | /* Activity accoutning */ | ||
126 | /* Pending reqeusts issued from upper layer */ | ||
127 | int smbd_send_pending; | ||
128 | wait_queue_head_t wait_smbd_send_pending; | ||
129 | |||
130 | int smbd_recv_pending; | ||
131 | wait_queue_head_t wait_smbd_recv_pending; | ||
132 | |||
133 | atomic_t send_pending; | ||
134 | wait_queue_head_t wait_send_pending; | ||
135 | atomic_t send_payload_pending; | ||
136 | wait_queue_head_t wait_send_payload_pending; | ||
137 | |||
138 | /* Receive queue */ | ||
139 | struct list_head receive_queue; | ||
140 | int count_receive_queue; | ||
141 | spinlock_t receive_queue_lock; | ||
142 | |||
143 | struct list_head empty_packet_queue; | ||
144 | int count_empty_packet_queue; | ||
145 | spinlock_t empty_packet_queue_lock; | ||
146 | |||
147 | wait_queue_head_t wait_receive_queues; | ||
148 | |||
149 | /* Reassembly queue */ | ||
150 | struct list_head reassembly_queue; | ||
151 | spinlock_t reassembly_queue_lock; | ||
152 | wait_queue_head_t wait_reassembly_queue; | ||
153 | |||
154 | /* total data length of reassembly queue */ | ||
155 | int reassembly_data_length; | ||
156 | int reassembly_queue_length; | ||
157 | /* the offset to first buffer in reassembly queue */ | ||
158 | int first_entry_offset; | ||
159 | |||
160 | bool send_immediate; | ||
161 | |||
162 | wait_queue_head_t wait_send_queue; | ||
163 | |||
164 | /* | ||
165 | * Indicate if we have received a full packet on the connection | ||
166 | * This is used to identify the first SMBD packet of a assembled | ||
167 | * payload (SMB packet) in reassembly queue so we can return a | ||
168 | * RFC1002 length to upper layer to indicate the length of the SMB | ||
169 | * packet received | ||
170 | */ | ||
171 | bool full_packet_received; | ||
172 | |||
173 | struct workqueue_struct *workqueue; | ||
174 | struct delayed_work idle_timer_work; | ||
175 | struct delayed_work send_immediate_work; | ||
176 | |||
177 | /* Memory pool for preallocating buffers */ | ||
178 | /* request pool for RDMA send */ | ||
179 | struct kmem_cache *request_cache; | ||
180 | mempool_t *request_mempool; | ||
181 | |||
182 | /* response pool for RDMA receive */ | ||
183 | struct kmem_cache *response_cache; | ||
184 | mempool_t *response_mempool; | ||
185 | |||
186 | /* for debug purposes */ | ||
187 | unsigned int count_get_receive_buffer; | ||
188 | unsigned int count_put_receive_buffer; | ||
189 | unsigned int count_reassembly_queue; | ||
190 | unsigned int count_enqueue_reassembly_queue; | ||
191 | unsigned int count_dequeue_reassembly_queue; | ||
192 | unsigned int count_send_empty; | ||
193 | }; | ||
194 | |||
195 | enum smbd_message_type { | ||
196 | SMBD_NEGOTIATE_RESP, | ||
197 | SMBD_TRANSFER_DATA, | ||
198 | }; | ||
199 | |||
200 | #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001 | ||
201 | |||
202 | /* SMBD negotiation request packet [MS-SMBD] 2.2.1 */ | ||
203 | struct smbd_negotiate_req { | ||
204 | __le16 min_version; | ||
205 | __le16 max_version; | ||
206 | __le16 reserved; | ||
207 | __le16 credits_requested; | ||
208 | __le32 preferred_send_size; | ||
209 | __le32 max_receive_size; | ||
210 | __le32 max_fragmented_size; | ||
211 | } __packed; | ||
212 | |||
213 | /* SMBD negotiation response packet [MS-SMBD] 2.2.2 */ | ||
214 | struct smbd_negotiate_resp { | ||
215 | __le16 min_version; | ||
216 | __le16 max_version; | ||
217 | __le16 negotiated_version; | ||
218 | __le16 reserved; | ||
219 | __le16 credits_requested; | ||
220 | __le16 credits_granted; | ||
221 | __le32 status; | ||
222 | __le32 max_readwrite_size; | ||
223 | __le32 preferred_send_size; | ||
224 | __le32 max_receive_size; | ||
225 | __le32 max_fragmented_size; | ||
226 | } __packed; | ||
227 | |||
228 | /* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */ | ||
229 | struct smbd_data_transfer { | ||
230 | __le16 credits_requested; | ||
231 | __le16 credits_granted; | ||
232 | __le16 flags; | ||
233 | __le16 reserved; | ||
234 | __le32 remaining_data_length; | ||
235 | __le32 data_offset; | ||
236 | __le32 data_length; | ||
237 | __le32 padding; | ||
238 | __u8 buffer[]; | ||
239 | } __packed; | ||
240 | |||
241 | /* The packet fields for a registered RDMA buffer */ | ||
242 | struct smbd_buffer_descriptor_v1 { | ||
243 | __le64 offset; | ||
244 | __le32 token; | ||
245 | __le32 length; | ||
246 | } __packed; | ||
247 | |||
248 | /* Default maximum number of SGEs in a RDMA send/recv */ | ||
249 | #define SMBDIRECT_MAX_SGE 16 | ||
250 | /* The context for a SMBD request */ | ||
251 | struct smbd_request { | ||
252 | struct smbd_connection *info; | ||
253 | struct ib_cqe cqe; | ||
254 | |||
255 | /* true if this request carries upper layer payload */ | ||
256 | bool has_payload; | ||
257 | |||
258 | /* the SGE entries for this packet */ | ||
259 | struct ib_sge sge[SMBDIRECT_MAX_SGE]; | ||
260 | int num_sge; | ||
261 | |||
262 | /* SMBD packet header follows this structure */ | ||
263 | u8 packet[]; | ||
264 | }; | ||
265 | |||
266 | /* The context for a SMBD response */ | ||
267 | struct smbd_response { | ||
268 | struct smbd_connection *info; | ||
269 | struct ib_cqe cqe; | ||
270 | struct ib_sge sge; | ||
271 | |||
272 | enum smbd_message_type type; | ||
273 | |||
274 | /* Link to receive queue or reassembly queue */ | ||
275 | struct list_head list; | ||
276 | |||
277 | /* Indicate if this is the 1st packet of a payload */ | ||
278 | bool first_segment; | ||
279 | |||
280 | /* SMBD packet header and payload follows this structure */ | ||
281 | u8 packet[]; | ||
282 | }; | ||
283 | |||
284 | /* Create a SMBDirect session */ | ||
285 | struct smbd_connection *smbd_get_connection( | ||
286 | struct TCP_Server_Info *server, struct sockaddr *dstaddr); | ||
287 | |||
288 | /* Reconnect SMBDirect session */ | ||
289 | int smbd_reconnect(struct TCP_Server_Info *server); | ||
290 | /* Destroy SMBDirect session */ | ||
291 | void smbd_destroy(struct smbd_connection *info); | ||
292 | |||
293 | /* Interface for carrying upper layer I/O through send/recv */ | ||
294 | int smbd_recv(struct smbd_connection *info, struct msghdr *msg); | ||
295 | int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst); | ||
296 | |||
297 | enum mr_state { | ||
298 | MR_READY, | ||
299 | MR_REGISTERED, | ||
300 | MR_INVALIDATED, | ||
301 | MR_ERROR | ||
302 | }; | ||
303 | |||
304 | struct smbd_mr { | ||
305 | struct smbd_connection *conn; | ||
306 | struct list_head list; | ||
307 | enum mr_state state; | ||
308 | struct ib_mr *mr; | ||
309 | struct scatterlist *sgl; | ||
310 | int sgl_count; | ||
311 | enum dma_data_direction dir; | ||
312 | union { | ||
313 | struct ib_reg_wr wr; | ||
314 | struct ib_send_wr inv_wr; | ||
315 | }; | ||
316 | struct ib_cqe cqe; | ||
317 | bool need_invalidate; | ||
318 | struct completion invalidate_done; | ||
319 | }; | ||
320 | |||
321 | /* Interfaces to register and deregister MR for RDMA read/write */ | ||
322 | struct smbd_mr *smbd_register_mr( | ||
323 | struct smbd_connection *info, struct page *pages[], int num_pages, | ||
324 | int tailsz, bool writing, bool need_invalidate); | ||
325 | int smbd_deregister_mr(struct smbd_mr *mr); | ||
326 | |||
327 | #else | ||
328 | #define cifs_rdma_enabled(server) 0 | ||
329 | struct smbd_connection {}; | ||
330 | static inline void *smbd_get_connection( | ||
331 | struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;} | ||
332 | static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } | ||
333 | static inline void smbd_destroy(struct smbd_connection *info) {} | ||
334 | static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } | ||
335 | static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; } | ||
336 | #endif | ||
337 | |||
338 | #endif | ||
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 7efbab013957..9779b3292d8e 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -37,6 +37,10 @@ | |||
37 | #include "cifsglob.h" | 37 | #include "cifsglob.h" |
38 | #include "cifsproto.h" | 38 | #include "cifsproto.h" |
39 | #include "cifs_debug.h" | 39 | #include "cifs_debug.h" |
40 | #include "smbdirect.h" | ||
41 | |||
42 | /* Max number of iovectors we can use off the stack when sending requests. */ | ||
43 | #define CIFS_MAX_IOV_SIZE 8 | ||
40 | 44 | ||
41 | void | 45 | void |
42 | cifs_wake_up_task(struct mid_q_entry *mid) | 46 | cifs_wake_up_task(struct mid_q_entry *mid) |
@@ -229,7 +233,10 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
229 | struct socket *ssocket = server->ssocket; | 233 | struct socket *ssocket = server->ssocket; |
230 | struct msghdr smb_msg; | 234 | struct msghdr smb_msg; |
231 | int val = 1; | 235 | int val = 1; |
232 | 236 | if (cifs_rdma_enabled(server) && server->smbd_conn) { | |
237 | rc = smbd_send(server->smbd_conn, rqst); | ||
238 | goto smbd_done; | ||
239 | } | ||
233 | if (ssocket == NULL) | 240 | if (ssocket == NULL) |
234 | return -ENOTSOCK; | 241 | return -ENOTSOCK; |
235 | 242 | ||
@@ -298,7 +305,7 @@ uncork: | |||
298 | */ | 305 | */ |
299 | server->tcpStatus = CifsNeedReconnect; | 306 | server->tcpStatus = CifsNeedReconnect; |
300 | } | 307 | } |
301 | 308 | smbd_done: | |
302 | if (rc < 0 && rc != -EINTR) | 309 | if (rc < 0 && rc != -EINTR) |
303 | cifs_dbg(VFS, "Error %d sending data on socket to server\n", | 310 | cifs_dbg(VFS, "Error %d sending data on socket to server\n", |
304 | rc); | 311 | rc); |
@@ -803,12 +810,16 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
803 | const int flags, struct kvec *resp_iov) | 810 | const int flags, struct kvec *resp_iov) |
804 | { | 811 | { |
805 | struct smb_rqst rqst; | 812 | struct smb_rqst rqst; |
806 | struct kvec *new_iov; | 813 | struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov; |
807 | int rc; | 814 | int rc; |
808 | 815 | ||
809 | new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL); | 816 | if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { |
810 | if (!new_iov) | 817 | new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), |
811 | return -ENOMEM; | 818 | GFP_KERNEL); |
819 | if (!new_iov) | ||
820 | return -ENOMEM; | ||
821 | } else | ||
822 | new_iov = s_iov; | ||
812 | 823 | ||
813 | /* 1st iov is a RFC1001 length followed by the rest of the packet */ | 824 | /* 1st iov is a RFC1001 length followed by the rest of the packet */ |
814 | memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); | 825 | memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); |
@@ -823,7 +834,51 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
823 | rqst.rq_nvec = n_vec + 1; | 834 | rqst.rq_nvec = n_vec + 1; |
824 | 835 | ||
825 | rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov); | 836 | rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov); |
826 | kfree(new_iov); | 837 | if (n_vec + 1 > CIFS_MAX_IOV_SIZE) |
838 | kfree(new_iov); | ||
839 | return rc; | ||
840 | } | ||
841 | |||
842 | /* Like SendReceive2 but iov[0] does not contain an rfc1002 header */ | ||
843 | int | ||
844 | smb2_send_recv(const unsigned int xid, struct cifs_ses *ses, | ||
845 | struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, | ||
846 | const int flags, struct kvec *resp_iov) | ||
847 | { | ||
848 | struct smb_rqst rqst; | ||
849 | struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov; | ||
850 | int rc; | ||
851 | int i; | ||
852 | __u32 count; | ||
853 | __be32 rfc1002_marker; | ||
854 | |||
855 | if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { | ||
856 | new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), | ||
857 | GFP_KERNEL); | ||
858 | if (!new_iov) | ||
859 | return -ENOMEM; | ||
860 | } else | ||
861 | new_iov = s_iov; | ||
862 | |||
863 | /* 1st iov is an RFC1002 Session Message length */ | ||
864 | memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); | ||
865 | |||
866 | count = 0; | ||
867 | for (i = 1; i < n_vec + 1; i++) | ||
868 | count += new_iov[i].iov_len; | ||
869 | |||
870 | rfc1002_marker = cpu_to_be32(count); | ||
871 | |||
872 | new_iov[0].iov_base = &rfc1002_marker; | ||
873 | new_iov[0].iov_len = 4; | ||
874 | |||
875 | memset(&rqst, 0, sizeof(struct smb_rqst)); | ||
876 | rqst.rq_iov = new_iov; | ||
877 | rqst.rq_nvec = n_vec + 1; | ||
878 | |||
879 | rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov); | ||
880 | if (n_vec + 1 > CIFS_MAX_IOV_SIZE) | ||
881 | kfree(new_iov); | ||
827 | return rc; | 882 | return rc; |
828 | } | 883 | } |
829 | 884 | ||