aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ieee802154/at86rf230.c
diff options
context:
space:
mode:
authorAlexander Aring <alex.aring@gmail.com>2015-09-21 03:37:54 -0400
committerMarcel Holtmann <marcel@holtmann.org>2015-09-22 05:51:20 -0400
commit57c1bc7ea8a4857070722c23ce98e01f256c9306 (patch)
treeb6bed2b40a81231d98627b1be5aaf025d68d9bd6 /drivers/net/ieee802154/at86rf230.c
parent02c7b6922899621aa8e8babe27fca7b6b2e497b0 (diff)
at86rf230: support edge triggered irq
This patch adds support for edge triggered irq types. We remove the locking for irq resources by enable/disable irq and allocate directly some heap buffer at isr. We have still a enable/disable irq path but this is for level-triggered irq's which need to be disabled until spi_async clear the irq line. There is usually a little race condition between "irq line cleared" and "enable_irq". When in this time a edge triggered irq arrived, we will not recognize this interrupt. This case can't happend at at86rf230. The reason is that we unmask TRX_END irq's only which indicates a transmit or receive completion, which depends on the current state. On Transmit: TRX_END arrived and transceiver is in TX_ARET_ON state again, in this state no other TRX_END can happen until we leave the state. On Receive: This is protected with the RX_SAFE_MODE bit which leaves the transceiver in RX_AACK_BUSY until we readed the framebuffer. In this state no other TRX_END can happen. Tested with RPi where I first detected issues between edge/level irq's. Signed-off-by: Alexander Aring <alex.aring@gmail.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'drivers/net/ieee802154/at86rf230.c')
-rw-r--r--drivers/net/ieee802154/at86rf230.c211
1 files changed, 89 insertions, 122 deletions
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 9756e6451038..de6e4fa2d6aa 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -81,7 +81,7 @@ struct at86rf230_state_change {
81 u8 from_state; 81 u8 from_state;
82 u8 to_state; 82 u8 to_state;
83 83
84 bool irq_enable; 84 bool free;
85}; 85};
86 86
87struct at86rf230_trac { 87struct at86rf230_trac {
@@ -105,8 +105,6 @@ struct at86rf230_local {
105 struct completion state_complete; 105 struct completion state_complete;
106 struct at86rf230_state_change state; 106 struct at86rf230_state_change state;
107 107
108 struct at86rf230_state_change irq;
109
110 unsigned long cal_timeout; 108 unsigned long cal_timeout;
111 bool is_tx; 109 bool is_tx;
112 bool is_tx_from_off; 110 bool is_tx_from_off;
@@ -122,8 +120,7 @@ struct at86rf230_local {
122static void 120static void
123at86rf230_async_state_change(struct at86rf230_local *lp, 121at86rf230_async_state_change(struct at86rf230_local *lp,
124 struct at86rf230_state_change *ctx, 122 struct at86rf230_state_change *ctx,
125 const u8 state, void (*complete)(void *context), 123 const u8 state, void (*complete)(void *context));
126 const bool irq_enable);
127 124
128static inline void 125static inline void
129at86rf230_sleep(struct at86rf230_local *lp) 126at86rf230_sleep(struct at86rf230_local *lp)
@@ -352,8 +349,10 @@ at86rf230_async_error_recover(void *context)
352 struct at86rf230_local *lp = ctx->lp; 349 struct at86rf230_local *lp = ctx->lp;
353 350
354 lp->is_tx = 0; 351 lp->is_tx = 0;
355 at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false); 352 at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
356 ieee802154_wake_queue(lp->hw); 353 ieee802154_wake_queue(lp->hw);
354 if (ctx->free)
355 kfree(ctx);
357} 356}
358 357
359static inline void 358static inline void
@@ -363,15 +362,14 @@ at86rf230_async_error(struct at86rf230_local *lp,
363 dev_err(&lp->spi->dev, "spi_async error %d\n", rc); 362 dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
364 363
365 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, 364 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
366 at86rf230_async_error_recover, false); 365 at86rf230_async_error_recover);
367} 366}
368 367
369/* Generic function to get some register value in async mode */ 368/* Generic function to get some register value in async mode */
370static void 369static void
371at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg, 370at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg,
372 struct at86rf230_state_change *ctx, 371 struct at86rf230_state_change *ctx,
373 void (*complete)(void *context), 372 void (*complete)(void *context))
374 const bool irq_enable)
375{ 373{
376 int rc; 374 int rc;
377 375
@@ -379,14 +377,24 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
379 377
380 tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; 378 tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
381 ctx->msg.complete = complete; 379 ctx->msg.complete = complete;
382 ctx->irq_enable = irq_enable;
383 rc = spi_async(lp->spi, &ctx->msg); 380 rc = spi_async(lp->spi, &ctx->msg);
384 if (rc) { 381 if (rc)
385 if (irq_enable) 382 at86rf230_async_error(lp, ctx, rc);
386 enable_irq(ctx->irq); 383}
387 384
385static void
386at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val,
387 struct at86rf230_state_change *ctx,
388 void (*complete)(void *context))
389{
390 int rc;
391
392 ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
393 ctx->buf[1] = val;
394 ctx->msg.complete = complete;
395 rc = spi_async(lp->spi, &ctx->msg);
396 if (rc)
388 at86rf230_async_error(lp, ctx, rc); 397 at86rf230_async_error(lp, ctx, rc);
389 }
390} 398}
391 399
392static void 400static void
@@ -434,8 +442,7 @@ at86rf230_async_state_assert(void *context)
434 lp->tx_retry++; 442 lp->tx_retry++;
435 443
436 at86rf230_async_state_change(lp, ctx, state, 444 at86rf230_async_state_change(lp, ctx, state,
437 ctx->complete, 445 ctx->complete);
438 ctx->irq_enable);
439 return; 446 return;
440 } 447 }
441 } 448 }
@@ -456,8 +463,7 @@ static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer)
456 struct at86rf230_local *lp = ctx->lp; 463 struct at86rf230_local *lp = ctx->lp;
457 464
458 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 465 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
459 at86rf230_async_state_assert, 466 at86rf230_async_state_assert);
460 ctx->irq_enable);
461 467
462 return HRTIMER_NORESTART; 468 return HRTIMER_NORESTART;
463} 469}
@@ -562,14 +568,12 @@ at86rf230_async_state_change_start(void *context)
562 struct at86rf230_local *lp = ctx->lp; 568 struct at86rf230_local *lp = ctx->lp;
563 u8 *buf = ctx->buf; 569 u8 *buf = ctx->buf;
564 const u8 trx_state = buf[1] & TRX_STATE_MASK; 570 const u8 trx_state = buf[1] & TRX_STATE_MASK;
565 int rc;
566 571
567 /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ 572 /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
568 if (trx_state == STATE_TRANSITION_IN_PROGRESS) { 573 if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
569 udelay(1); 574 udelay(1);
570 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 575 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
571 at86rf230_async_state_change_start, 576 at86rf230_async_state_change_start);
572 ctx->irq_enable);
573 return; 577 return;
574 } 578 }
575 579
@@ -586,31 +590,20 @@ at86rf230_async_state_change_start(void *context)
586 /* Going into the next step for a state change which do a timing 590 /* Going into the next step for a state change which do a timing
587 * relevant delay. 591 * relevant delay.
588 */ 592 */
589 buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; 593 at86rf230_async_write_reg(lp, RG_TRX_STATE, ctx->to_state, ctx,
590 buf[1] = ctx->to_state; 594 at86rf230_async_state_delay);
591 ctx->msg.complete = at86rf230_async_state_delay;
592 rc = spi_async(lp->spi, &ctx->msg);
593 if (rc) {
594 if (ctx->irq_enable)
595 enable_irq(ctx->irq);
596
597 at86rf230_async_error(lp, ctx, rc);
598 }
599} 595}
600 596
601static void 597static void
602at86rf230_async_state_change(struct at86rf230_local *lp, 598at86rf230_async_state_change(struct at86rf230_local *lp,
603 struct at86rf230_state_change *ctx, 599 struct at86rf230_state_change *ctx,
604 const u8 state, void (*complete)(void *context), 600 const u8 state, void (*complete)(void *context))
605 const bool irq_enable)
606{ 601{
607 /* Initialization for the state change context */ 602 /* Initialization for the state change context */
608 ctx->to_state = state; 603 ctx->to_state = state;
609 ctx->complete = complete; 604 ctx->complete = complete;
610 ctx->irq_enable = irq_enable;
611 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 605 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
612 at86rf230_async_state_change_start, 606 at86rf230_async_state_change_start);
613 irq_enable);
614} 607}
615 608
616static void 609static void
@@ -632,8 +625,7 @@ at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
632 unsigned long rc; 625 unsigned long rc;
633 626
634 at86rf230_async_state_change(lp, &lp->state, state, 627 at86rf230_async_state_change(lp, &lp->state, state,
635 at86rf230_sync_state_change_complete, 628 at86rf230_sync_state_change_complete);
636 false);
637 629
638 rc = wait_for_completion_timeout(&lp->state_complete, 630 rc = wait_for_completion_timeout(&lp->state_complete,
639 msecs_to_jiffies(100)); 631 msecs_to_jiffies(100));
@@ -651,9 +643,8 @@ at86rf230_tx_complete(void *context)
651 struct at86rf230_state_change *ctx = context; 643 struct at86rf230_state_change *ctx = context;
652 struct at86rf230_local *lp = ctx->lp; 644 struct at86rf230_local *lp = ctx->lp;
653 645
654 enable_irq(ctx->irq);
655
656 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); 646 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
647 kfree(ctx);
657} 648}
658 649
659static void 650static void
@@ -663,7 +654,7 @@ at86rf230_tx_on(void *context)
663 struct at86rf230_local *lp = ctx->lp; 654 struct at86rf230_local *lp = ctx->lp;
664 655
665 at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, 656 at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
666 at86rf230_tx_complete, true); 657 at86rf230_tx_complete);
667} 658}
668 659
669static void 660static void
@@ -697,8 +688,7 @@ at86rf230_tx_trac_check(void *context)
697 } 688 }
698 } 689 }
699 690
700 at86rf230_async_state_change(lp, &lp->irq, STATE_TX_ON, 691 at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on);
701 at86rf230_tx_on, true);
702} 692}
703 693
704static void 694static void
@@ -706,7 +696,6 @@ at86rf230_rx_read_frame_complete(void *context)
706{ 696{
707 struct at86rf230_state_change *ctx = context; 697 struct at86rf230_state_change *ctx = context;
708 struct at86rf230_local *lp = ctx->lp; 698 struct at86rf230_local *lp = ctx->lp;
709 u8 rx_local_buf[AT86RF2XX_MAX_BUF];
710 const u8 *buf = ctx->buf; 699 const u8 *buf = ctx->buf;
711 struct sk_buff *skb; 700 struct sk_buff *skb;
712 u8 len, lqi; 701 u8 len, lqi;
@@ -718,18 +707,16 @@ at86rf230_rx_read_frame_complete(void *context)
718 } 707 }
719 lqi = buf[2 + len]; 708 lqi = buf[2 + len];
720 709
721 memcpy(rx_local_buf, buf + 2, len);
722 ctx->trx.len = 2;
723 enable_irq(ctx->irq);
724
725 skb = dev_alloc_skb(IEEE802154_MTU); 710 skb = dev_alloc_skb(IEEE802154_MTU);
726 if (!skb) { 711 if (!skb) {
727 dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n"); 712 dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
713 kfree(ctx);
728 return; 714 return;
729 } 715 }
730 716
731 memcpy(skb_put(skb, len), rx_local_buf, len); 717 memcpy(skb_put(skb, len), buf + 2, len);
732 ieee802154_rx_irqsafe(lp->hw, skb, lqi); 718 ieee802154_rx_irqsafe(lp->hw, skb, lqi);
719 kfree(ctx);
733} 720}
734 721
735static void 722static void
@@ -765,21 +752,23 @@ at86rf230_rx_trac_check(void *context)
765 rc = spi_async(lp->spi, &ctx->msg); 752 rc = spi_async(lp->spi, &ctx->msg);
766 if (rc) { 753 if (rc) {
767 ctx->trx.len = 2; 754 ctx->trx.len = 2;
768 enable_irq(ctx->irq);
769 at86rf230_async_error(lp, ctx, rc); 755 at86rf230_async_error(lp, ctx, rc);
770 } 756 }
771} 757}
772 758
773static void 759static void
774at86rf230_irq_trx_end(struct at86rf230_local *lp) 760at86rf230_irq_trx_end(void *context)
775{ 761{
762 struct at86rf230_state_change *ctx = context;
763 struct at86rf230_local *lp = ctx->lp;
764
776 if (lp->is_tx) { 765 if (lp->is_tx) {
777 lp->is_tx = 0; 766 lp->is_tx = 0;
778 at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, 767 at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
779 at86rf230_tx_trac_check, true); 768 at86rf230_tx_trac_check);
780 } else { 769 } else {
781 at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, 770 at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
782 at86rf230_rx_trac_check, true); 771 at86rf230_rx_trac_check);
783 } 772 }
784} 773}
785 774
@@ -789,32 +778,59 @@ at86rf230_irq_status(void *context)
789 struct at86rf230_state_change *ctx = context; 778 struct at86rf230_state_change *ctx = context;
790 struct at86rf230_local *lp = ctx->lp; 779 struct at86rf230_local *lp = ctx->lp;
791 const u8 *buf = ctx->buf; 780 const u8 *buf = ctx->buf;
792 const u8 irq = buf[1]; 781 u8 irq = buf[1];
782
783 enable_irq(lp->spi->irq);
793 784
794 if (irq & IRQ_TRX_END) { 785 if (irq & IRQ_TRX_END) {
795 at86rf230_irq_trx_end(lp); 786 at86rf230_irq_trx_end(ctx);
796 } else { 787 } else {
797 enable_irq(ctx->irq);
798 dev_err(&lp->spi->dev, "not supported irq %02x received\n", 788 dev_err(&lp->spi->dev, "not supported irq %02x received\n",
799 irq); 789 irq);
790 kfree(ctx);
800 } 791 }
801} 792}
802 793
794static void
795at86rf230_setup_spi_messages(struct at86rf230_local *lp,
796 struct at86rf230_state_change *state)
797{
798 state->lp = lp;
799 state->irq = lp->spi->irq;
800 spi_message_init(&state->msg);
801 state->msg.context = state;
802 state->trx.len = 2;
803 state->trx.tx_buf = state->buf;
804 state->trx.rx_buf = state->buf;
805 spi_message_add_tail(&state->trx, &state->msg);
806 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
807 state->timer.function = at86rf230_async_state_timer;
808}
809
803static irqreturn_t at86rf230_isr(int irq, void *data) 810static irqreturn_t at86rf230_isr(int irq, void *data)
804{ 811{
805 struct at86rf230_local *lp = data; 812 struct at86rf230_local *lp = data;
806 struct at86rf230_state_change *ctx = &lp->irq; 813 struct at86rf230_state_change *ctx;
807 u8 *buf = ctx->buf;
808 int rc; 814 int rc;
809 815
810 disable_irq_nosync(irq); 816 disable_irq_nosync(irq);
811 817
812 buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; 818 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
819 if (!ctx) {
820 enable_irq(irq);
821 return IRQ_NONE;
822 }
823
824 at86rf230_setup_spi_messages(lp, ctx);
825 /* tell on error handling to free ctx */
826 ctx->free = true;
827
828 ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
813 ctx->msg.complete = at86rf230_irq_status; 829 ctx->msg.complete = at86rf230_irq_status;
814 rc = spi_async(lp->spi, &ctx->msg); 830 rc = spi_async(lp->spi, &ctx->msg);
815 if (rc) { 831 if (rc) {
816 enable_irq(irq);
817 at86rf230_async_error(lp, ctx, rc); 832 at86rf230_async_error(lp, ctx, rc);
833 enable_irq(irq);
818 return IRQ_NONE; 834 return IRQ_NONE;
819 } 835 }
820 836
@@ -826,21 +842,14 @@ at86rf230_write_frame_complete(void *context)
826{ 842{
827 struct at86rf230_state_change *ctx = context; 843 struct at86rf230_state_change *ctx = context;
828 struct at86rf230_local *lp = ctx->lp; 844 struct at86rf230_local *lp = ctx->lp;
829 u8 *buf = ctx->buf;
830 int rc;
831 845
832 ctx->trx.len = 2; 846 ctx->trx.len = 2;
833 847
834 if (gpio_is_valid(lp->slp_tr)) { 848 if (gpio_is_valid(lp->slp_tr))
835 at86rf230_slp_tr_rising_edge(lp); 849 at86rf230_slp_tr_rising_edge(lp);
836 } else { 850 else
837 buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; 851 at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx,
838 buf[1] = STATE_BUSY_TX; 852 NULL);
839 ctx->msg.complete = NULL;
840 rc = spi_async(lp->spi, &ctx->msg);
841 if (rc)
842 at86rf230_async_error(lp, ctx, rc);
843 }
844} 853}
845 854
846static void 855static void
@@ -873,7 +882,7 @@ at86rf230_xmit_tx_on(void *context)
873 struct at86rf230_local *lp = ctx->lp; 882 struct at86rf230_local *lp = ctx->lp;
874 883
875 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, 884 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
876 at86rf230_write_frame, false); 885 at86rf230_write_frame);
877} 886}
878 887
879static void 888static void
@@ -886,12 +895,10 @@ at86rf230_xmit_start(void *context)
886 if (lp->is_tx_from_off) { 895 if (lp->is_tx_from_off) {
887 lp->is_tx_from_off = false; 896 lp->is_tx_from_off = false;
888 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, 897 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
889 at86rf230_write_frame, 898 at86rf230_write_frame);
890 false);
891 } else { 899 } else {
892 at86rf230_async_state_change(lp, ctx, STATE_TX_ON, 900 at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
893 at86rf230_xmit_tx_on, 901 at86rf230_xmit_tx_on);
894 false);
895 } 902 }
896} 903}
897 904
@@ -914,7 +921,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
914 if (time_is_before_jiffies(lp->cal_timeout)) { 921 if (time_is_before_jiffies(lp->cal_timeout)) {
915 lp->is_tx_from_off = true; 922 lp->is_tx_from_off = true;
916 at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, 923 at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
917 at86rf230_xmit_start, false); 924 at86rf230_xmit_start);
918 } else { 925 } else {
919 at86rf230_xmit_start(ctx); 926 at86rf230_xmit_start(ctx);
920 } 927 }
@@ -1373,10 +1380,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
1373 return rc; 1380 return rc;
1374 1381
1375 irq_type = irq_get_trigger_type(lp->spi->irq); 1382 irq_type = irq_get_trigger_type(lp->spi->irq);
1376 if (irq_type == IRQ_TYPE_EDGE_RISING ||
1377 irq_type == IRQ_TYPE_EDGE_FALLING)
1378 dev_warn(&lp->spi->dev,
1379 "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n");
1380 if (irq_type == IRQ_TYPE_EDGE_FALLING || 1383 if (irq_type == IRQ_TYPE_EDGE_FALLING ||
1381 irq_type == IRQ_TYPE_LEVEL_LOW) 1384 irq_type == IRQ_TYPE_LEVEL_LOW)
1382 irq_pol = IRQ_ACTIVE_LOW; 1385 irq_pol = IRQ_ACTIVE_LOW;
@@ -1602,43 +1605,6 @@ not_supp:
1602 return rc; 1605 return rc;
1603} 1606}
1604 1607
1605static void
1606at86rf230_setup_spi_messages(struct at86rf230_local *lp)
1607{
1608 lp->state.lp = lp;
1609 lp->state.irq = lp->spi->irq;
1610 spi_message_init(&lp->state.msg);
1611 lp->state.msg.context = &lp->state;
1612 lp->state.trx.len = 2;
1613 lp->state.trx.tx_buf = lp->state.buf;
1614 lp->state.trx.rx_buf = lp->state.buf;
1615 spi_message_add_tail(&lp->state.trx, &lp->state.msg);
1616 hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1617 lp->state.timer.function = at86rf230_async_state_timer;
1618
1619 lp->irq.lp = lp;
1620 lp->irq.irq = lp->spi->irq;
1621 spi_message_init(&lp->irq.msg);
1622 lp->irq.msg.context = &lp->irq;
1623 lp->irq.trx.len = 2;
1624 lp->irq.trx.tx_buf = lp->irq.buf;
1625 lp->irq.trx.rx_buf = lp->irq.buf;
1626 spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
1627 hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1628 lp->irq.timer.function = at86rf230_async_state_timer;
1629
1630 lp->tx.lp = lp;
1631 lp->tx.irq = lp->spi->irq;
1632 spi_message_init(&lp->tx.msg);
1633 lp->tx.msg.context = &lp->tx;
1634 lp->tx.trx.len = 2;
1635 lp->tx.trx.tx_buf = lp->tx.buf;
1636 lp->tx.trx.rx_buf = lp->tx.buf;
1637 spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
1638 hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1639 lp->tx.timer.function = at86rf230_async_state_timer;
1640}
1641
1642#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS 1608#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
1643static struct dentry *at86rf230_debugfs_root; 1609static struct dentry *at86rf230_debugfs_root;
1644 1610
@@ -1760,7 +1726,8 @@ static int at86rf230_probe(struct spi_device *spi)
1760 goto free_dev; 1726 goto free_dev;
1761 } 1727 }
1762 1728
1763 at86rf230_setup_spi_messages(lp); 1729 at86rf230_setup_spi_messages(lp, &lp->state);
1730 at86rf230_setup_spi_messages(lp, &lp->tx);
1764 1731
1765 rc = at86rf230_detect_device(lp); 1732 rc = at86rf230_detect_device(lp);
1766 if (rc < 0) 1733 if (rc < 0)