diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 01:06:28 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-13 01:06:28 -0500 |
commit | f43dc23d5ea91fca257be02138a255f02d98e806 (patch) | |
tree | b29722f6e965316e90ac97abf79923ced250dc21 /drivers/dma/dmaengine.c | |
parent | f8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff) | |
parent | 4162cf64973df51fc885825bc9ca4d055891c49f (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts:
arch/sh/kernel/cpu/sh2/setup-sh7619.c
arch/sh/kernel/cpu/sh2a/setup-mxg.c
arch/sh/kernel/cpu/sh2a/setup-sh7201.c
arch/sh/kernel/cpu/sh2a/setup-sh7203.c
arch/sh/kernel/cpu/sh2a/setup-sh7206.c
arch/sh/kernel/cpu/sh3/setup-sh7705.c
arch/sh/kernel/cpu/sh3/setup-sh770x.c
arch/sh/kernel/cpu/sh3/setup-sh7710.c
arch/sh/kernel/cpu/sh3/setup-sh7720.c
arch/sh/kernel/cpu/sh4/setup-sh4-202.c
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/cpu/sh4/setup-sh7760.c
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
arch/sh/kernel/cpu/sh4a/setup-sh7723.c
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
arch/sh/kernel/cpu/sh4a/setup-sh7786.c
arch/sh/kernel/cpu/sh4a/setup-shx3.c
arch/sh/kernel/cpu/sh5/setup-sh5.c
drivers/serial/sh-sci.c
drivers/serial/sh-sci.h
include/linux/serial_sci.h
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 170 |
1 files changed, 97 insertions, 73 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5a87384ea4ff..8bcb15fb959d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/jiffies.h> | 58 | #include <linux/jiffies.h> |
59 | #include <linux/rculist.h> | 59 | #include <linux/rculist.h> |
60 | #include <linux/idr.h> | 60 | #include <linux/idr.h> |
61 | #include <linux/slab.h> | ||
61 | 62 | ||
62 | static DEFINE_MUTEX(dma_list_mutex); | 63 | static DEFINE_MUTEX(dma_list_mutex); |
63 | static LIST_HEAD(dma_device_list); | 64 | static LIST_HEAD(dma_device_list); |
@@ -284,7 +285,7 @@ struct dma_chan_tbl_ent { | |||
284 | /** | 285 | /** |
285 | * channel_table - percpu lookup table for memory-to-memory offload providers | 286 | * channel_table - percpu lookup table for memory-to-memory offload providers |
286 | */ | 287 | */ |
287 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; | 288 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; |
288 | 289 | ||
289 | static int __init dma_channel_table_init(void) | 290 | static int __init dma_channel_table_init(void) |
290 | { | 291 | { |
@@ -326,14 +327,7 @@ arch_initcall(dma_channel_table_init); | |||
326 | */ | 327 | */ |
327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | 328 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) |
328 | { | 329 | { |
329 | struct dma_chan *chan; | 330 | return this_cpu_read(channel_table[tx_type]->chan); |
330 | int cpu; | ||
331 | |||
332 | cpu = get_cpu(); | ||
333 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | ||
334 | put_cpu(); | ||
335 | |||
336 | return chan; | ||
337 | } | 331 | } |
338 | EXPORT_SYMBOL(dma_find_channel); | 332 | EXPORT_SYMBOL(dma_find_channel); |
339 | 333 | ||
@@ -521,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v | |||
521 | break; | 515 | break; |
522 | if (--device->privatecnt == 0) | 516 | if (--device->privatecnt == 0) |
523 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | 517 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); |
524 | chan->private = NULL; | ||
525 | chan = NULL; | 518 | chan = NULL; |
526 | } | 519 | } |
527 | } | 520 | } |
@@ -543,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan) | |||
543 | /* drop PRIVATE cap enabled by __dma_request_channel() */ | 536 | /* drop PRIVATE cap enabled by __dma_request_channel() */ |
544 | if (--chan->device->privatecnt == 0) | 537 | if (--chan->device->privatecnt == 0) |
545 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | 538 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); |
546 | chan->private = NULL; | ||
547 | mutex_unlock(&dma_list_mutex); | 539 | mutex_unlock(&dma_list_mutex); |
548 | } | 540 | } |
549 | EXPORT_SYMBOL_GPL(dma_release_channel); | 541 | EXPORT_SYMBOL_GPL(dma_release_channel); |
@@ -608,6 +600,50 @@ void dmaengine_put(void) | |||
608 | } | 600 | } |
609 | EXPORT_SYMBOL(dmaengine_put); | 601 | EXPORT_SYMBOL(dmaengine_put); |
610 | 602 | ||
603 | static bool device_has_all_tx_types(struct dma_device *device) | ||
604 | { | ||
605 | /* A device that satisfies this test has channels that will never cause | ||
606 | * an async_tx channel switch event as all possible operation types can | ||
607 | * be handled. | ||
608 | */ | ||
609 | #ifdef CONFIG_ASYNC_TX_DMA | ||
610 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | ||
611 | return false; | ||
612 | #endif | ||
613 | |||
614 | #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) | ||
615 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | ||
616 | return false; | ||
617 | #endif | ||
618 | |||
619 | #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) | ||
620 | if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) | ||
621 | return false; | ||
622 | #endif | ||
623 | |||
624 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) | ||
625 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | ||
626 | return false; | ||
627 | |||
628 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
629 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) | ||
630 | return false; | ||
631 | #endif | ||
632 | #endif | ||
633 | |||
634 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | ||
635 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | ||
636 | return false; | ||
637 | |||
638 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
639 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) | ||
640 | return false; | ||
641 | #endif | ||
642 | #endif | ||
643 | |||
644 | return true; | ||
645 | } | ||
646 | |||
611 | static int get_dma_id(struct dma_device *device) | 647 | static int get_dma_id(struct dma_device *device) |
612 | { | 648 | { |
613 | int rc; | 649 | int rc; |
@@ -644,23 +680,37 @@ int dma_async_device_register(struct dma_device *device) | |||
644 | !device->device_prep_dma_memcpy); | 680 | !device->device_prep_dma_memcpy); |
645 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | 681 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && |
646 | !device->device_prep_dma_xor); | 682 | !device->device_prep_dma_xor); |
647 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | 683 | BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && |
648 | !device->device_prep_dma_zero_sum); | 684 | !device->device_prep_dma_xor_val); |
685 | BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && | ||
686 | !device->device_prep_dma_pq); | ||
687 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | ||
688 | !device->device_prep_dma_pq_val); | ||
649 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | 689 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && |
650 | !device->device_prep_dma_memset); | 690 | !device->device_prep_dma_memset); |
651 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 691 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
652 | !device->device_prep_dma_interrupt); | 692 | !device->device_prep_dma_interrupt); |
693 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | ||
694 | !device->device_prep_dma_sg); | ||
653 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 695 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
654 | !device->device_prep_slave_sg); | 696 | !device->device_prep_slave_sg); |
697 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | ||
698 | !device->device_prep_dma_cyclic); | ||
655 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 699 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
656 | !device->device_terminate_all); | 700 | !device->device_control); |
657 | 701 | ||
658 | BUG_ON(!device->device_alloc_chan_resources); | 702 | BUG_ON(!device->device_alloc_chan_resources); |
659 | BUG_ON(!device->device_free_chan_resources); | 703 | BUG_ON(!device->device_free_chan_resources); |
660 | BUG_ON(!device->device_is_tx_complete); | 704 | BUG_ON(!device->device_tx_status); |
661 | BUG_ON(!device->device_issue_pending); | 705 | BUG_ON(!device->device_issue_pending); |
662 | BUG_ON(!device->dev); | 706 | BUG_ON(!device->dev); |
663 | 707 | ||
708 | /* note: this only matters in the | ||
709 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case | ||
710 | */ | ||
711 | if (device_has_all_tx_types(device)) | ||
712 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | ||
713 | |||
664 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); | 714 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
665 | if (!idr_ref) | 715 | if (!idr_ref) |
666 | return -ENOMEM; | 716 | return -ENOMEM; |
@@ -779,6 +829,7 @@ void dma_async_device_unregister(struct dma_device *device) | |||
779 | chan->dev->chan = NULL; | 829 | chan->dev->chan = NULL; |
780 | mutex_unlock(&dma_list_mutex); | 830 | mutex_unlock(&dma_list_mutex); |
781 | device_unregister(&chan->dev->device); | 831 | device_unregister(&chan->dev->device); |
832 | free_percpu(chan->local); | ||
782 | } | 833 | } |
783 | } | 834 | } |
784 | EXPORT_SYMBOL(dma_async_device_unregister); | 835 | EXPORT_SYMBOL(dma_async_device_unregister); |
@@ -803,7 +854,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
803 | struct dma_async_tx_descriptor *tx; | 854 | struct dma_async_tx_descriptor *tx; |
804 | dma_addr_t dma_dest, dma_src; | 855 | dma_addr_t dma_dest, dma_src; |
805 | dma_cookie_t cookie; | 856 | dma_cookie_t cookie; |
806 | int cpu; | ||
807 | unsigned long flags; | 857 | unsigned long flags; |
808 | 858 | ||
809 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 859 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
@@ -822,10 +872,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
822 | tx->callback = NULL; | 872 | tx->callback = NULL; |
823 | cookie = tx->tx_submit(tx); | 873 | cookie = tx->tx_submit(tx); |
824 | 874 | ||
825 | cpu = get_cpu(); | 875 | preempt_disable(); |
826 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 876 | __this_cpu_add(chan->local->bytes_transferred, len); |
827 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 877 | __this_cpu_inc(chan->local->memcpy_count); |
828 | put_cpu(); | 878 | preempt_enable(); |
829 | 879 | ||
830 | return cookie; | 880 | return cookie; |
831 | } | 881 | } |
@@ -852,7 +902,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
852 | struct dma_async_tx_descriptor *tx; | 902 | struct dma_async_tx_descriptor *tx; |
853 | dma_addr_t dma_dest, dma_src; | 903 | dma_addr_t dma_dest, dma_src; |
854 | dma_cookie_t cookie; | 904 | dma_cookie_t cookie; |
855 | int cpu; | ||
856 | unsigned long flags; | 905 | unsigned long flags; |
857 | 906 | ||
858 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 907 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
@@ -869,10 +918,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
869 | tx->callback = NULL; | 918 | tx->callback = NULL; |
870 | cookie = tx->tx_submit(tx); | 919 | cookie = tx->tx_submit(tx); |
871 | 920 | ||
872 | cpu = get_cpu(); | 921 | preempt_disable(); |
873 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 922 | __this_cpu_add(chan->local->bytes_transferred, len); |
874 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 923 | __this_cpu_inc(chan->local->memcpy_count); |
875 | put_cpu(); | 924 | preempt_enable(); |
876 | 925 | ||
877 | return cookie; | 926 | return cookie; |
878 | } | 927 | } |
@@ -901,7 +950,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
901 | struct dma_async_tx_descriptor *tx; | 950 | struct dma_async_tx_descriptor *tx; |
902 | dma_addr_t dma_dest, dma_src; | 951 | dma_addr_t dma_dest, dma_src; |
903 | dma_cookie_t cookie; | 952 | dma_cookie_t cookie; |
904 | int cpu; | ||
905 | unsigned long flags; | 953 | unsigned long flags; |
906 | 954 | ||
907 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 955 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
@@ -919,10 +967,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
919 | tx->callback = NULL; | 967 | tx->callback = NULL; |
920 | cookie = tx->tx_submit(tx); | 968 | cookie = tx->tx_submit(tx); |
921 | 969 | ||
922 | cpu = get_cpu(); | 970 | preempt_disable(); |
923 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | 971 | __this_cpu_add(chan->local->bytes_transferred, len); |
924 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | 972 | __this_cpu_inc(chan->local->memcpy_count); |
925 | put_cpu(); | 973 | preempt_enable(); |
926 | 974 | ||
927 | return cookie; | 975 | return cookie; |
928 | } | 976 | } |
@@ -932,56 +980,32 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
932 | struct dma_chan *chan) | 980 | struct dma_chan *chan) |
933 | { | 981 | { |
934 | tx->chan = chan; | 982 | tx->chan = chan; |
983 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
935 | spin_lock_init(&tx->lock); | 984 | spin_lock_init(&tx->lock); |
936 | INIT_LIST_HEAD(&tx->tx_list); | 985 | #endif |
937 | } | 986 | } |
938 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 987 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
939 | 988 | ||
940 | /* dma_wait_for_async_tx - spin wait for a transaction to complete | 989 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
941 | * @tx: in-flight transaction to wait on | 990 | * @tx: in-flight transaction to wait on |
942 | * | ||
943 | * This routine assumes that tx was obtained from a call to async_memcpy, | ||
944 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | ||
945 | * and submitted). Walking the parent chain is only meant to cover for DMA | ||
946 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | ||
947 | * the driver's descriptor cleanup routine. | ||
948 | */ | 991 | */ |
949 | enum dma_status | 992 | enum dma_status |
950 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 993 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
951 | { | 994 | { |
952 | enum dma_status status; | 995 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
953 | struct dma_async_tx_descriptor *iter; | ||
954 | struct dma_async_tx_descriptor *parent; | ||
955 | 996 | ||
956 | if (!tx) | 997 | if (!tx) |
957 | return DMA_SUCCESS; | 998 | return DMA_SUCCESS; |
958 | 999 | ||
959 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | 1000 | while (tx->cookie == -EBUSY) { |
960 | " %s\n", __func__, dma_chan_name(tx->chan)); | 1001 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
961 | 1002 | pr_err("%s timeout waiting for descriptor submission\n", | |
962 | /* poll through the dependency chain, return when tx is complete */ | 1003 | __func__); |
963 | do { | 1004 | return DMA_ERROR; |
964 | iter = tx; | 1005 | } |
965 | 1006 | cpu_relax(); | |
966 | /* find the root of the unsubmitted dependency chain */ | 1007 | } |
967 | do { | 1008 | return dma_sync_wait(tx->chan, tx->cookie); |
968 | parent = iter->parent; | ||
969 | if (!parent) | ||
970 | break; | ||
971 | else | ||
972 | iter = parent; | ||
973 | } while (parent); | ||
974 | |||
975 | /* there is a small window for ->parent == NULL and | ||
976 | * ->cookie == -EBUSY | ||
977 | */ | ||
978 | while (iter->cookie == -EBUSY) | ||
979 | cpu_relax(); | ||
980 | |||
981 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
982 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
983 | |||
984 | return status; | ||
985 | } | 1009 | } |
986 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | 1010 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); |
987 | 1011 | ||
@@ -991,7 +1015,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |||
991 | */ | 1015 | */ |
992 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | 1016 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) |
993 | { | 1017 | { |
994 | struct dma_async_tx_descriptor *dep = tx->next; | 1018 | struct dma_async_tx_descriptor *dep = txd_next(tx); |
995 | struct dma_async_tx_descriptor *dep_next; | 1019 | struct dma_async_tx_descriptor *dep_next; |
996 | struct dma_chan *chan; | 1020 | struct dma_chan *chan; |
997 | 1021 | ||
@@ -999,7 +1023,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |||
999 | return; | 1023 | return; |
1000 | 1024 | ||
1001 | /* we'll submit tx->next now, so clear the link */ | 1025 | /* we'll submit tx->next now, so clear the link */ |
1002 | tx->next = NULL; | 1026 | txd_clear_next(tx); |
1003 | chan = dep->chan; | 1027 | chan = dep->chan; |
1004 | 1028 | ||
1005 | /* keep submitting up until a channel switch is detected | 1029 | /* keep submitting up until a channel switch is detected |
@@ -1007,14 +1031,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |||
1007 | * processing the interrupt from async_tx_channel_switch | 1031 | * processing the interrupt from async_tx_channel_switch |
1008 | */ | 1032 | */ |
1009 | for (; dep; dep = dep_next) { | 1033 | for (; dep; dep = dep_next) { |
1010 | spin_lock_bh(&dep->lock); | 1034 | txd_lock(dep); |
1011 | dep->parent = NULL; | 1035 | txd_clear_parent(dep); |
1012 | dep_next = dep->next; | 1036 | dep_next = txd_next(dep); |
1013 | if (dep_next && dep_next->chan == chan) | 1037 | if (dep_next && dep_next->chan == chan) |
1014 | dep->next = NULL; /* ->next will be submitted */ | 1038 | txd_clear_next(dep); /* ->next will be submitted */ |
1015 | else | 1039 | else |
1016 | dep_next = NULL; /* submit current dep and terminate */ | 1040 | dep_next = NULL; /* submit current dep and terminate */ |
1017 | spin_unlock_bh(&dep->lock); | 1041 | txd_unlock(dep); |
1018 | 1042 | ||
1019 | dep->tx_submit(dep); | 1043 | dep->tx_submit(dep); |
1020 | } | 1044 | } |