aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2011-01-19 10:58:16 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-08-29 05:30:26 -0400
commitc012949a4084a9f91654121d28f199ef408cb9d7 (patch)
tree55a8a57d1b68749c55df2f1a9c222f3f5d2c9851 /drivers/block
parentc6d25cfe52a32232e4de0bbe6ddf8219f054f55c (diff)
drbd: Replaced all p_header80 with a generic p_header
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_int.h64
-rw-r--r--drivers/block/drbd/drbd_main.c54
-rw-r--r--drivers/block/drbd/drbd_receiver.c16
-rw-r--r--drivers/block/drbd/drbd_worker.c2
4 files changed, 64 insertions, 72 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 526928c368c9..dc669dfe5b0d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -338,7 +338,6 @@ struct p_header80 {
338 u32 magic; 338 u32 magic;
339 u16 command; 339 u16 command;
340 u16 length; /* bytes of data after this header */ 340 u16 length; /* bytes of data after this header */
341 u8 payload[0];
342} __packed; 341} __packed;
343 342
344/* Header for big packets, Used for data packets exceeding 64kB */ 343/* Header for big packets, Used for data packets exceeding 64kB */
@@ -349,9 +348,12 @@ struct p_header95 {
349 u8 payload[0]; 348 u8 payload[0];
350} __packed; 349} __packed;
351 350
352union p_header { 351struct p_header {
353 struct p_header80 h80; 352 union {
354 struct p_header95 h95; 353 struct p_header80 h80;
354 struct p_header95 h95;
355 };
356 u8 payload[0];
355}; 357};
356 358
357/* 359/*
@@ -380,7 +382,7 @@ union p_header {
380#define DP_DISCARD 64 /* equals REQ_DISCARD */ 382#define DP_DISCARD 64 /* equals REQ_DISCARD */
381 383
382struct p_data { 384struct p_data {
383 union p_header head; 385 struct p_header head;
384 u64 sector; /* 64 bits sector number */ 386 u64 sector; /* 64 bits sector number */
385 u64 block_id; /* to identify the request in protocol B&C */ 387 u64 block_id; /* to identify the request in protocol B&C */
386 u32 seq_num; 388 u32 seq_num;
@@ -396,7 +398,7 @@ struct p_data {
396 * P_DATA_REQUEST, P_RS_DATA_REQUEST 398 * P_DATA_REQUEST, P_RS_DATA_REQUEST
397 */ 399 */
398struct p_block_ack { 400struct p_block_ack {
399 struct p_header80 head; 401 struct p_header head;
400 u64 sector; 402 u64 sector;
401 u64 block_id; 403 u64 block_id;
402 u32 blksize; 404 u32 blksize;
@@ -405,7 +407,7 @@ struct p_block_ack {
405 407
406 408
407struct p_block_req { 409struct p_block_req {
408 struct p_header80 head; 410 struct p_header head;
409 u64 sector; 411 u64 sector;
410 u64 block_id; 412 u64 block_id;
411 u32 blksize; 413 u32 blksize;
@@ -422,7 +424,7 @@ struct p_block_req {
422 */ 424 */
423 425
424struct p_handshake { 426struct p_handshake {
425 struct p_header80 head; /* 8 bytes */ 427 struct p_header head; /* Note: You must always use a h80 here */
426 u32 protocol_min; 428 u32 protocol_min;
427 u32 feature_flags; 429 u32 feature_flags;
428 u32 protocol_max; 430 u32 protocol_max;
@@ -437,19 +439,19 @@ struct p_handshake {
437/* 80 bytes, FIXED for the next century */ 439/* 80 bytes, FIXED for the next century */
438 440
439struct p_barrier { 441struct p_barrier {
440 struct p_header80 head; 442 struct p_header head;
441 u32 barrier; /* barrier number _handle_ only */ 443 u32 barrier; /* barrier number _handle_ only */
442 u32 pad; /* to multiple of 8 Byte */ 444 u32 pad; /* to multiple of 8 Byte */
443} __packed; 445} __packed;
444 446
445struct p_barrier_ack { 447struct p_barrier_ack {
446 struct p_header80 head; 448 struct p_header head;
447 u32 barrier; 449 u32 barrier;
448 u32 set_size; 450 u32 set_size;
449} __packed; 451} __packed;
450 452
451struct p_rs_param { 453struct p_rs_param {
452 struct p_header80 head; 454 struct p_header head;
453 u32 rate; 455 u32 rate;
454 456
455 /* Since protocol version 88 and higher. */ 457 /* Since protocol version 88 and higher. */
@@ -457,7 +459,7 @@ struct p_rs_param {
457} __packed; 459} __packed;
458 460
459struct p_rs_param_89 { 461struct p_rs_param_89 {
460 struct p_header80 head; 462 struct p_header head;
461 u32 rate; 463 u32 rate;
462 /* protocol version 89: */ 464 /* protocol version 89: */
463 char verify_alg[SHARED_SECRET_MAX]; 465 char verify_alg[SHARED_SECRET_MAX];
@@ -465,7 +467,7 @@ struct p_rs_param_89 {
465} __packed; 467} __packed;
466 468
467struct p_rs_param_95 { 469struct p_rs_param_95 {
468 struct p_header80 head; 470 struct p_header head;
469 u32 rate; 471 u32 rate;
470 char verify_alg[SHARED_SECRET_MAX]; 472 char verify_alg[SHARED_SECRET_MAX];
471 char csums_alg[SHARED_SECRET_MAX]; 473 char csums_alg[SHARED_SECRET_MAX];
@@ -481,7 +483,7 @@ enum drbd_conn_flags {
481}; 483};
482 484
483struct p_protocol { 485struct p_protocol {
484 struct p_header80 head; 486 struct p_header head;
485 u32 protocol; 487 u32 protocol;
486 u32 after_sb_0p; 488 u32 after_sb_0p;
487 u32 after_sb_1p; 489 u32 after_sb_1p;
@@ -495,17 +497,17 @@ struct p_protocol {
495} __packed; 497} __packed;
496 498
497struct p_uuids { 499struct p_uuids {
498 struct p_header80 head; 500 struct p_header head;
499 u64 uuid[UI_EXTENDED_SIZE]; 501 u64 uuid[UI_EXTENDED_SIZE];
500} __packed; 502} __packed;
501 503
502struct p_rs_uuid { 504struct p_rs_uuid {
503 struct p_header80 head; 505 struct p_header head;
504 u64 uuid; 506 u64 uuid;
505} __packed; 507} __packed;
506 508
507struct p_sizes { 509struct p_sizes {
508 struct p_header80 head; 510 struct p_header head;
509 u64 d_size; /* size of disk */ 511 u64 d_size; /* size of disk */
510 u64 u_size; /* user requested size */ 512 u64 u_size; /* user requested size */
511 u64 c_size; /* current exported size */ 513 u64 c_size; /* current exported size */
@@ -515,18 +517,18 @@ struct p_sizes {
515} __packed; 517} __packed;
516 518
517struct p_state { 519struct p_state {
518 struct p_header80 head; 520 struct p_header head;
519 u32 state; 521 u32 state;
520} __packed; 522} __packed;
521 523
522struct p_req_state { 524struct p_req_state {
523 struct p_header80 head; 525 struct p_header head;
524 u32 mask; 526 u32 mask;
525 u32 val; 527 u32 val;
526} __packed; 528} __packed;
527 529
528struct p_req_state_reply { 530struct p_req_state_reply {
529 struct p_header80 head; 531 struct p_header head;
530 u32 retcode; 532 u32 retcode;
531} __packed; 533} __packed;
532 534
@@ -541,14 +543,14 @@ struct p_drbd06_param {
541} __packed; 543} __packed;
542 544
543struct p_discard { 545struct p_discard {
544 struct p_header80 head; 546 struct p_header head;
545 u64 block_id; 547 u64 block_id;
546 u32 seq_num; 548 u32 seq_num;
547 u32 pad; 549 u32 pad;
548} __packed; 550} __packed;
549 551
550struct p_block_desc { 552struct p_block_desc {
551 struct p_header80 head; 553 struct p_header head;
552 u64 sector; 554 u64 sector;
553 u32 blksize; 555 u32 blksize;
554 u32 pad; /* to multiple of 8 Byte */ 556 u32 pad; /* to multiple of 8 Byte */
@@ -564,7 +566,7 @@ enum drbd_bitmap_code {
564}; 566};
565 567
566struct p_compressed_bm { 568struct p_compressed_bm {
567 struct p_header80 head; 569 struct p_header head;
568 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code 570 /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
569 * (encoding & 0x80): polarity (set/unset) of first runlength 571 * (encoding & 0x80): polarity (set/unset) of first runlength
570 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits 572 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
@@ -576,7 +578,7 @@ struct p_compressed_bm {
576} __packed; 578} __packed;
577 579
578struct p_delay_probe93 { 580struct p_delay_probe93 {
579 struct p_header80 head; 581 struct p_header head;
580 u32 seq_num; /* sequence number to match the two probe packets */ 582 u32 seq_num; /* sequence number to match the two probe packets */
581 u32 offset; /* usecs the probe got sent after the reference time point */ 583 u32 offset; /* usecs the probe got sent after the reference time point */
582} __packed; 584} __packed;
@@ -625,7 +627,7 @@ DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
625 * so we need to use the fixed size 4KiB page size 627 * so we need to use the fixed size 4KiB page size
626 * most architectures have used for a long time. 628 * most architectures have used for a long time.
627 */ 629 */
628#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80)) 630#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header))
629#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) 631#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
630#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) 632#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
631#if (PAGE_SIZE < 4096) 633#if (PAGE_SIZE < 4096)
@@ -634,7 +636,7 @@ DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
634#endif 636#endif
635 637
636union p_polymorph { 638union p_polymorph {
637 union p_header header; 639 struct p_header header;
638 struct p_handshake handshake; 640 struct p_handshake handshake;
639 struct p_data data; 641 struct p_data data;
640 struct p_block_ack block_ack; 642 struct p_block_ack block_ack;
@@ -1245,12 +1247,12 @@ extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_f
1245extern int _drbd_send_state(struct drbd_conf *mdev); 1247extern int _drbd_send_state(struct drbd_conf *mdev);
1246extern int drbd_send_state(struct drbd_conf *mdev); 1248extern int drbd_send_state(struct drbd_conf *mdev);
1247extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, 1249extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1248 enum drbd_packets cmd, struct p_header80 *h, 1250 enum drbd_packets cmd, struct p_header *h,
1249 size_t size, unsigned msg_flags); 1251 size_t size, unsigned msg_flags);
1250#define USE_DATA_SOCKET 1 1252#define USE_DATA_SOCKET 1
1251#define USE_META_SOCKET 0 1253#define USE_META_SOCKET 0
1252extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, 1254extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1253 enum drbd_packets cmd, struct p_header80 *h, 1255 enum drbd_packets cmd, struct p_header *h,
1254 size_t size); 1256 size_t size);
1255extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, 1257extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
1256 char *data, size_t size); 1258 char *data, size_t size);
@@ -2019,19 +2021,19 @@ static inline void request_ping(struct drbd_conf *mdev)
2019static inline int drbd_send_short_cmd(struct drbd_conf *mdev, 2021static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
2020 enum drbd_packets cmd) 2022 enum drbd_packets cmd)
2021{ 2023{
2022 struct p_header80 h; 2024 struct p_header h;
2023 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); 2025 return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
2024} 2026}
2025 2027
2026static inline int drbd_send_ping(struct drbd_conf *mdev) 2028static inline int drbd_send_ping(struct drbd_conf *mdev)
2027{ 2029{
2028 struct p_header80 h; 2030 struct p_header h;
2029 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); 2031 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
2030} 2032}
2031 2033
2032static inline int drbd_send_ping_ack(struct drbd_conf *mdev) 2034static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
2033{ 2035{
2034 struct p_header80 h; 2036 struct p_header h;
2035 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); 2037 return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
2036} 2038}
2037 2039
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 699f63929c1c..55ce48e24b8e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1822,9 +1822,10 @@ void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1822 1822
1823/* the appropriate socket mutex must be held already */ 1823/* the appropriate socket mutex must be held already */
1824int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, 1824int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1825 enum drbd_packets cmd, struct p_header80 *h, 1825 enum drbd_packets cmd, struct p_header *hg,
1826 size_t size, unsigned msg_flags) 1826 size_t size, unsigned msg_flags)
1827{ 1827{
1828 struct p_header80 *h = (struct p_header80 *)hg;
1828 int sent, ok; 1829 int sent, ok;
1829 1830
1830 if (!expect(h)) 1831 if (!expect(h))
@@ -1849,7 +1850,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1849 * when we hold the appropriate socket mutex. 1850 * when we hold the appropriate socket mutex.
1850 */ 1851 */
1851int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, 1852int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1852 enum drbd_packets cmd, struct p_header80 *h, size_t size) 1853 enum drbd_packets cmd, struct p_header *h, size_t size)
1853{ 1854{
1854 int ok = 0; 1855 int ok = 0;
1855 struct socket *sock; 1856 struct socket *sock;
@@ -1983,8 +1984,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
1983 if (mdev->tconn->agreed_pro_version >= 87) 1984 if (mdev->tconn->agreed_pro_version >= 87)
1984 strcpy(p->integrity_alg, mdev->tconn->net_conf->integrity_alg); 1985 strcpy(p->integrity_alg, mdev->tconn->net_conf->integrity_alg);
1985 1986
1986 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL, 1987 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL, &p->head, size);
1987 (struct p_header80 *)p, size);
1988 kfree(p); 1988 kfree(p);
1989 return rv; 1989 return rv;
1990} 1990}
@@ -2009,8 +2009,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2009 2009
2010 put_ldev(mdev); 2010 put_ldev(mdev);
2011 2011
2012 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, 2012 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, &p.head, sizeof(p));
2013 (struct p_header80 *)&p, sizeof(p));
2014} 2013}
2015 2014
2016int drbd_send_uuids(struct drbd_conf *mdev) 2015int drbd_send_uuids(struct drbd_conf *mdev)
@@ -2054,8 +2053,7 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
2054 drbd_md_sync(mdev); 2053 drbd_md_sync(mdev);
2055 p.uuid = cpu_to_be64(uuid); 2054 p.uuid = cpu_to_be64(uuid);
2056 2055
2057 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, 2056 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, &p.head, sizeof(p));
2058 (struct p_header80 *)&p, sizeof(p));
2059} 2057}
2060 2058
2061int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) 2059int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
@@ -2087,8 +2085,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2087 p.queue_order_type = cpu_to_be16(q_order_type); 2085 p.queue_order_type = cpu_to_be16(q_order_type);
2088 p.dds_flags = cpu_to_be16(flags); 2086 p.dds_flags = cpu_to_be16(flags);
2089 2087
2090 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, 2088 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, &p.head, sizeof(p));
2091 (struct p_header80 *)&p, sizeof(p));
2092 return ok; 2089 return ok;
2093} 2090}
2094 2091
@@ -2112,8 +2109,7 @@ int drbd_send_state(struct drbd_conf *mdev)
2112 sock = mdev->tconn->data.socket; 2109 sock = mdev->tconn->data.socket;
2113 2110
2114 if (likely(sock != NULL)) { 2111 if (likely(sock != NULL)) {
2115 ok = _drbd_send_cmd(mdev, sock, P_STATE, 2112 ok = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0);
2116 (struct p_header80 *)&p, sizeof(p), 0);
2117 } 2113 }
2118 2114
2119 mutex_unlock(&mdev->tconn->data.mutex); 2115 mutex_unlock(&mdev->tconn->data.mutex);
@@ -2130,8 +2126,7 @@ int drbd_send_state_req(struct drbd_conf *mdev,
2130 p.mask = cpu_to_be32(mask.i); 2126 p.mask = cpu_to_be32(mask.i);
2131 p.val = cpu_to_be32(val.i); 2127 p.val = cpu_to_be32(val.i);
2132 2128
2133 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, 2129 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, &p.head, sizeof(p));
2134 (struct p_header80 *)&p, sizeof(p));
2135} 2130}
2136 2131
2137int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) 2132int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
@@ -2140,8 +2135,7 @@ int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2140 2135
2141 p.retcode = cpu_to_be32(retcode); 2136 p.retcode = cpu_to_be32(retcode);
2142 2137
2143 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, 2138 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, &p.head, sizeof(p));
2144 (struct p_header80 *)&p, sizeof(p));
2145} 2139}
2146 2140
2147int fill_bitmap_rle_bits(struct drbd_conf *mdev, 2141int fill_bitmap_rle_bits(struct drbd_conf *mdev,
@@ -2246,7 +2240,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2246 */ 2240 */
2247static int 2241static int
2248send_bitmap_rle_or_plain(struct drbd_conf *mdev, 2242send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2249 struct p_header80 *h, struct bm_xfer_ctx *c) 2243 struct p_header *h, struct bm_xfer_ctx *c)
2250{ 2244{
2251 struct p_compressed_bm *p = (void*)h; 2245 struct p_compressed_bm *p = (void*)h;
2252 unsigned long num_words; 2246 unsigned long num_words;
@@ -2300,7 +2294,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2300int _drbd_send_bitmap(struct drbd_conf *mdev) 2294int _drbd_send_bitmap(struct drbd_conf *mdev)
2301{ 2295{
2302 struct bm_xfer_ctx c; 2296 struct bm_xfer_ctx c;
2303 struct p_header80 *p; 2297 struct p_header *p;
2304 int err; 2298 int err;
2305 2299
2306 if (!expect(mdev->bitmap)) 2300 if (!expect(mdev->bitmap))
@@ -2308,7 +2302,7 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
2308 2302
2309 /* maybe we should use some per thread scratch page, 2303 /* maybe we should use some per thread scratch page,
2310 * and allocate that during initial device creation? */ 2304 * and allocate that during initial device creation? */
2311 p = (struct p_header80 *) __get_free_page(GFP_NOIO); 2305 p = (struct p_header *) __get_free_page(GFP_NOIO);
2312 if (!p) { 2306 if (!p) {
2313 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 2307 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2314 return false; 2308 return false;
@@ -2365,8 +2359,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2365 2359
2366 if (mdev->state.conn < C_CONNECTED) 2360 if (mdev->state.conn < C_CONNECTED)
2367 return false; 2361 return false;
2368 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, 2362 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, &p.head, sizeof(p));
2369 (struct p_header80 *)&p, sizeof(p));
2370 return ok; 2363 return ok;
2371} 2364}
2372 2365
@@ -2393,8 +2386,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2393 2386
2394 if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED) 2387 if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED)
2395 return false; 2388 return false;
2396 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, 2389 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, &p.head, sizeof(p));
2397 (struct p_header80 *)&p, sizeof(p));
2398 return ok; 2390 return ok;
2399} 2391}
2400 2392
@@ -2452,8 +2444,7 @@ int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2452 p.block_id = block_id; 2444 p.block_id = block_id;
2453 p.blksize = cpu_to_be32(size); 2445 p.blksize = cpu_to_be32(size);
2454 2446
2455 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, 2447 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &p.head, sizeof(p));
2456 (struct p_header80 *)&p, sizeof(p));
2457 return ok; 2448 return ok;
2458} 2449}
2459 2450
@@ -2469,9 +2460,9 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev,
2469 p.block_id = ID_SYNCER /* unused */; 2460 p.block_id = ID_SYNCER /* unused */;
2470 p.blksize = cpu_to_be32(size); 2461 p.blksize = cpu_to_be32(size);
2471 2462
2472 p.head.magic = cpu_to_be32(DRBD_MAGIC); 2463 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
2473 p.head.command = cpu_to_be16(cmd); 2464 p.head.h80.command = cpu_to_be16(cmd);
2474 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size); 2465 p.head.h80.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2475 2466
2476 mutex_lock(&mdev->tconn->data.mutex); 2467 mutex_lock(&mdev->tconn->data.mutex);
2477 2468
@@ -2492,8 +2483,7 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2492 p.block_id = ID_SYNCER /* unused */; 2483 p.block_id = ID_SYNCER /* unused */;
2493 p.blksize = cpu_to_be32(size); 2484 p.blksize = cpu_to_be32(size);
2494 2485
2495 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, 2486 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, &p.head, sizeof(p));
2496 (struct p_header80 *)&p, sizeof(p));
2497 return ok; 2487 return ok;
2498} 2488}
2499 2489
@@ -2677,12 +2667,12 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2677 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC); 2667 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
2678 p.head.h80.command = cpu_to_be16(P_DATA); 2668 p.head.h80.command = cpu_to_be16(P_DATA);
2679 p.head.h80.length = 2669 p.head.h80.length =
2680 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->i.size); 2670 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->i.size);
2681 } else { 2671 } else {
2682 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG); 2672 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
2683 p.head.h95.command = cpu_to_be16(P_DATA); 2673 p.head.h95.command = cpu_to_be16(P_DATA);
2684 p.head.h95.length = 2674 p.head.h95.length =
2685 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->i.size); 2675 cpu_to_be32(sizeof(p) - sizeof(struct p_header) + dgs + req->i.size);
2686 } 2676 }
2687 2677
2688 p.sector = cpu_to_be64(req->i.sector); 2678 p.sector = cpu_to_be64(req->i.sector);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 12fdd737cb69..9393fe482efc 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -700,7 +700,7 @@ out:
700static int drbd_send_fp(struct drbd_conf *mdev, 700static int drbd_send_fp(struct drbd_conf *mdev,
701 struct socket *sock, enum drbd_packets cmd) 701 struct socket *sock, enum drbd_packets cmd)
702{ 702{
703 struct p_header80 *h = &mdev->tconn->data.sbuf.header.h80; 703 struct p_header *h = &mdev->tconn->data.sbuf.header;
704 704
705 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); 705 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
706} 706}
@@ -925,7 +925,7 @@ out_release_sockets:
925 925
926static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size) 926static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
927{ 927{
928 union p_header *h = &mdev->tconn->data.rbuf.header; 928 struct p_header *h = &mdev->tconn->data.rbuf.header;
929 int r; 929 int r;
930 930
931 r = drbd_recv(mdev, h, sizeof(*h)); 931 r = drbd_recv(mdev, h, sizeof(*h));
@@ -3477,7 +3477,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3477 const char *direction, struct bm_xfer_ctx *c) 3477 const char *direction, struct bm_xfer_ctx *c)
3478{ 3478{
3479 /* what would it take to transfer it "plaintext" */ 3479 /* what would it take to transfer it "plaintext" */
3480 unsigned plain = sizeof(struct p_header80) * 3480 unsigned plain = sizeof(struct p_header) *
3481 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) 3481 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3482 + c->bm_words * sizeof(long); 3482 + c->bm_words * sizeof(long);
3483 unsigned total = c->bytes[0] + c->bytes[1]; 3483 unsigned total = c->bytes[0] + c->bytes[1];
@@ -3699,7 +3699,7 @@ static struct data_cmd drbd_cmd_handler[] = {
3699 3699
3700static void drbdd(struct drbd_conf *mdev) 3700static void drbdd(struct drbd_conf *mdev)
3701{ 3701{
3702 union p_header *header = &mdev->tconn->data.rbuf.header; 3702 struct p_header *header = &mdev->tconn->data.rbuf.header;
3703 unsigned int packet_size; 3703 unsigned int packet_size;
3704 enum drbd_packets cmd; 3704 enum drbd_packets cmd;
3705 size_t shs; /* sub header size */ 3705 size_t shs; /* sub header size */
@@ -3715,14 +3715,14 @@ static void drbdd(struct drbd_conf *mdev)
3715 goto err_out; 3715 goto err_out;
3716 } 3716 }
3717 3717
3718 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3718 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(struct p_header);
3719 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3719 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3720 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3720 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3721 goto err_out; 3721 goto err_out;
3722 } 3722 }
3723 3723
3724 if (shs) { 3724 if (shs) {
3725 rv = drbd_recv(mdev, &header->h80.payload, shs); 3725 rv = drbd_recv(mdev, &header->payload, shs);
3726 if (unlikely(rv != shs)) { 3726 if (unlikely(rv != shs)) {
3727 if (!signal_pending(current)) 3727 if (!signal_pending(current))
3728 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); 3728 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
@@ -3909,8 +3909,8 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
3909 memset(p, 0, sizeof(*p)); 3909 memset(p, 0, sizeof(*p));
3910 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 3910 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3911 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 3911 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3912 ok = _drbd_send_cmd( mdev, mdev->tconn->data.socket, P_HAND_SHAKE, 3912 ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
3913 (struct p_header80 *)p, sizeof(*p), 0 ); 3913 &p->head, sizeof(*p), 0 );
3914 mutex_unlock(&mdev->tconn->data.mutex); 3914 mutex_unlock(&mdev->tconn->data.mutex);
3915 return ok; 3915 return ok;
3916} 3916}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 671251af6bcf..afad8ea4d888 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1224,7 +1224,7 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1224 * dec_ap_pending will be done in got_BarrierAck 1224 * dec_ap_pending will be done in got_BarrierAck
1225 * or (on connection loss) in w_clear_epoch. */ 1225 * or (on connection loss) in w_clear_epoch. */
1226 ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER, 1226 ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
1227 (struct p_header80 *)p, sizeof(*p), 0); 1227 &p->head, sizeof(*p), 0);
1228 drbd_put_data_sock(mdev); 1228 drbd_put_data_sock(mdev);
1229 1229
1230 return ok; 1230 return ok;