diff options
| author | Roland Dreier <rolandd@cisco.com> | 2008-01-25 17:15:42 -0500 |
|---|---|---|
| committer | Roland Dreier <rolandd@cisco.com> | 2008-01-25 17:15:42 -0500 |
| commit | 2fe7e6f7c9f55eac24c5b3cdf56af29ab9b0ca81 (patch) | |
| tree | 0e3712ae9033d133e839a22ac28144215a7ece4a | |
| parent | cf9542aa923982428fbf6a6f815c32ae2c3da8c7 (diff) | |
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
| -rw-r--r-- | drivers/infiniband/core/user_mad.c | 115 |
1 files changed, 53 insertions, 62 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index b53eac4611de..4e915104ac4c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
| 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
| 4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
| 5 | * Copyright (c) 2008 Cisco. All rights reserved. | ||
| 5 | * | 6 | * |
| 6 | * This software is available to you under a choice of one of two | 7 | * This software is available to you under a choice of one of two |
| 7 | * licenses. You may choose to be licensed under the terms of the GNU | 8 | * licenses. You may choose to be licensed under the terms of the GNU |
| @@ -42,7 +43,7 @@ | |||
| 42 | #include <linux/cdev.h> | 43 | #include <linux/cdev.h> |
| 43 | #include <linux/dma-mapping.h> | 44 | #include <linux/dma-mapping.h> |
| 44 | #include <linux/poll.h> | 45 | #include <linux/poll.h> |
| 45 | #include <linux/rwsem.h> | 46 | #include <linux/mutex.h> |
| 46 | #include <linux/kref.h> | 47 | #include <linux/kref.h> |
| 47 | #include <linux/compat.h> | 48 | #include <linux/compat.h> |
| 48 | 49 | ||
| @@ -94,7 +95,7 @@ struct ib_umad_port { | |||
| 94 | struct class_device *sm_class_dev; | 95 | struct class_device *sm_class_dev; |
| 95 | struct semaphore sm_sem; | 96 | struct semaphore sm_sem; |
| 96 | 97 | ||
| 97 | struct rw_semaphore mutex; | 98 | struct mutex file_mutex; |
| 98 | struct list_head file_list; | 99 | struct list_head file_list; |
| 99 | 100 | ||
| 100 | struct ib_device *ib_dev; | 101 | struct ib_device *ib_dev; |
| @@ -110,11 +111,11 @@ struct ib_umad_device { | |||
| 110 | }; | 111 | }; |
| 111 | 112 | ||
| 112 | struct ib_umad_file { | 113 | struct ib_umad_file { |
| 114 | struct mutex mutex; | ||
| 113 | struct ib_umad_port *port; | 115 | struct ib_umad_port *port; |
| 114 | struct list_head recv_list; | 116 | struct list_head recv_list; |
| 115 | struct list_head send_list; | 117 | struct list_head send_list; |
| 116 | struct list_head port_list; | 118 | struct list_head port_list; |
| 117 | spinlock_t recv_lock; | ||
| 118 | spinlock_t send_lock; | 119 | spinlock_t send_lock; |
| 119 | wait_queue_head_t recv_wait; | 120 | wait_queue_head_t recv_wait; |
| 120 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; | 121 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; |
| @@ -156,7 +157,7 @@ static int hdr_size(struct ib_umad_file *file) | |||
| 156 | sizeof (struct ib_user_mad_hdr_old); | 157 | sizeof (struct ib_user_mad_hdr_old); |
| 157 | } | 158 | } |
| 158 | 159 | ||
| 159 | /* caller must hold port->mutex at least for reading */ | 160 | /* caller must hold file->mutex */ |
| 160 | static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) | 161 | static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) |
| 161 | { | 162 | { |
| 162 | return file->agents_dead ? NULL : file->agent[id]; | 163 | return file->agents_dead ? NULL : file->agent[id]; |
| @@ -168,32 +169,30 @@ static int queue_packet(struct ib_umad_file *file, | |||
| 168 | { | 169 | { |
| 169 | int ret = 1; | 170 | int ret = 1; |
| 170 | 171 | ||
| 171 | down_read(&file->port->mutex); | 172 | mutex_lock(&file->mutex); |
| 172 | 173 | ||
| 173 | for (packet->mad.hdr.id = 0; | 174 | for (packet->mad.hdr.id = 0; |
| 174 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; | 175 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; |
| 175 | packet->mad.hdr.id++) | 176 | packet->mad.hdr.id++) |
| 176 | if (agent == __get_agent(file, packet->mad.hdr.id)) { | 177 | if (agent == __get_agent(file, packet->mad.hdr.id)) { |
| 177 | spin_lock_irq(&file->recv_lock); | ||
| 178 | list_add_tail(&packet->list, &file->recv_list); | 178 | list_add_tail(&packet->list, &file->recv_list); |
| 179 | spin_unlock_irq(&file->recv_lock); | ||
| 180 | wake_up_interruptible(&file->recv_wait); | 179 | wake_up_interruptible(&file->recv_wait); |
| 181 | ret = 0; | 180 | ret = 0; |
| 182 | break; | 181 | break; |
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | up_read(&file->port->mutex); | 184 | mutex_unlock(&file->mutex); |
| 186 | 185 | ||
| 187 | return ret; | 186 | return ret; |
| 188 | } | 187 | } |
| 189 | 188 | ||
| 190 | static void dequeue_send(struct ib_umad_file *file, | 189 | static void dequeue_send(struct ib_umad_file *file, |
| 191 | struct ib_umad_packet *packet) | 190 | struct ib_umad_packet *packet) |
| 192 | { | 191 | { |
| 193 | spin_lock_irq(&file->send_lock); | 192 | spin_lock_irq(&file->send_lock); |
| 194 | list_del(&packet->list); | 193 | list_del(&packet->list); |
| 195 | spin_unlock_irq(&file->send_lock); | 194 | spin_unlock_irq(&file->send_lock); |
| 196 | } | 195 | } |
| 197 | 196 | ||
| 198 | static void send_handler(struct ib_mad_agent *agent, | 197 | static void send_handler(struct ib_mad_agent *agent, |
| 199 | struct ib_mad_send_wc *send_wc) | 198 | struct ib_mad_send_wc *send_wc) |
| @@ -341,10 +340,10 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
| 341 | if (count < hdr_size(file)) | 340 | if (count < hdr_size(file)) |
| 342 | return -EINVAL; | 341 | return -EINVAL; |
| 343 | 342 | ||
| 344 | spin_lock_irq(&file->recv_lock); | 343 | mutex_lock(&file->mutex); |
| 345 | 344 | ||
| 346 | while (list_empty(&file->recv_list)) { | 345 | while (list_empty(&file->recv_list)) { |
| 347 | spin_unlock_irq(&file->recv_lock); | 346 | mutex_unlock(&file->mutex); |
| 348 | 347 | ||
| 349 | if (filp->f_flags & O_NONBLOCK) | 348 | if (filp->f_flags & O_NONBLOCK) |
| 350 | return -EAGAIN; | 349 | return -EAGAIN; |
| @@ -353,13 +352,13 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
| 353 | !list_empty(&file->recv_list))) | 352 | !list_empty(&file->recv_list))) |
| 354 | return -ERESTARTSYS; | 353 | return -ERESTARTSYS; |
| 355 | 354 | ||
| 356 | spin_lock_irq(&file->recv_lock); | 355 | mutex_lock(&file->mutex); |
| 357 | } | 356 | } |
| 358 | 357 | ||
| 359 | packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); | 358 | packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); |
| 360 | list_del(&packet->list); | 359 | list_del(&packet->list); |
| 361 | 360 | ||
| 362 | spin_unlock_irq(&file->recv_lock); | 361 | mutex_unlock(&file->mutex); |
| 363 | 362 | ||
| 364 | if (packet->recv_wc) | 363 | if (packet->recv_wc) |
| 365 | ret = copy_recv_mad(file, buf, packet, count); | 364 | ret = copy_recv_mad(file, buf, packet, count); |
| @@ -368,9 +367,9 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
| 368 | 367 | ||
| 369 | if (ret < 0) { | 368 | if (ret < 0) { |
| 370 | /* Requeue packet */ | 369 | /* Requeue packet */ |
| 371 | spin_lock_irq(&file->recv_lock); | 370 | mutex_lock(&file->mutex); |
| 372 | list_add(&packet->list, &file->recv_list); | 371 | list_add(&packet->list, &file->recv_list); |
| 373 | spin_unlock_irq(&file->recv_lock); | 372 | mutex_unlock(&file->mutex); |
| 374 | } else { | 373 | } else { |
| 375 | if (packet->recv_wc) | 374 | if (packet->recv_wc) |
| 376 | ib_free_recv_mad(packet->recv_wc); | 375 | ib_free_recv_mad(packet->recv_wc); |
| @@ -481,7 +480,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
| 481 | goto err; | 480 | goto err; |
| 482 | } | 481 | } |
| 483 | 482 | ||
| 484 | down_read(&file->port->mutex); | 483 | mutex_lock(&file->mutex); |
| 485 | 484 | ||
| 486 | agent = __get_agent(file, packet->mad.hdr.id); | 485 | agent = __get_agent(file, packet->mad.hdr.id); |
| 487 | if (!agent) { | 486 | if (!agent) { |
| @@ -577,7 +576,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
| 577 | if (ret) | 576 | if (ret) |
| 578 | goto err_send; | 577 | goto err_send; |
| 579 | 578 | ||
| 580 | up_read(&file->port->mutex); | 579 | mutex_unlock(&file->mutex); |
| 581 | return count; | 580 | return count; |
| 582 | 581 | ||
| 583 | err_send: | 582 | err_send: |
| @@ -587,7 +586,7 @@ err_msg: | |||
| 587 | err_ah: | 586 | err_ah: |
| 588 | ib_destroy_ah(ah); | 587 | ib_destroy_ah(ah); |
| 589 | err_up: | 588 | err_up: |
| 590 | up_read(&file->port->mutex); | 589 | mutex_unlock(&file->mutex); |
| 591 | err: | 590 | err: |
| 592 | kfree(packet); | 591 | kfree(packet); |
| 593 | return ret; | 592 | return ret; |
| @@ -613,11 +612,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, | |||
| 613 | { | 612 | { |
| 614 | struct ib_user_mad_reg_req ureq; | 613 | struct ib_user_mad_reg_req ureq; |
| 615 | struct ib_mad_reg_req req; | 614 | struct ib_mad_reg_req req; |
| 616 | struct ib_mad_agent *agent; | 615 | struct ib_mad_agent *agent = NULL; |
| 617 | int agent_id; | 616 | int agent_id; |
| 618 | int ret; | 617 | int ret; |
| 619 | 618 | ||
| 620 | down_write(&file->port->mutex); | 619 | mutex_lock(&file->port->file_mutex); |
| 620 | mutex_lock(&file->mutex); | ||
| 621 | 621 | ||
| 622 | if (!file->port->ib_dev) { | 622 | if (!file->port->ib_dev) { |
| 623 | ret = -EPIPE; | 623 | ret = -EPIPE; |
| @@ -666,13 +666,13 @@ found: | |||
| 666 | send_handler, recv_handler, file); | 666 | send_handler, recv_handler, file); |
| 667 | if (IS_ERR(agent)) { | 667 | if (IS_ERR(agent)) { |
| 668 | ret = PTR_ERR(agent); | 668 | ret = PTR_ERR(agent); |
| 669 | agent = NULL; | ||
| 669 | goto out; | 670 | goto out; |
| 670 | } | 671 | } |
| 671 | 672 | ||
| 672 | if (put_user(agent_id, | 673 | if (put_user(agent_id, |
| 673 | (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { | 674 | (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { |
| 674 | ret = -EFAULT; | 675 | ret = -EFAULT; |
| 675 | ib_unregister_mad_agent(agent); | ||
| 676 | goto out; | 676 | goto out; |
| 677 | } | 677 | } |
| 678 | 678 | ||
| @@ -690,7 +690,13 @@ found: | |||
| 690 | ret = 0; | 690 | ret = 0; |
| 691 | 691 | ||
| 692 | out: | 692 | out: |
| 693 | up_write(&file->port->mutex); | 693 | mutex_unlock(&file->mutex); |
| 694 | |||
| 695 | if (ret && agent) | ||
| 696 | ib_unregister_mad_agent(agent); | ||
| 697 | |||
| 698 | mutex_unlock(&file->port->file_mutex); | ||
| 699 | |||
| 694 | return ret; | 700 | return ret; |
| 695 | } | 701 | } |
| 696 | 702 | ||
| @@ -703,7 +709,8 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | |||
| 703 | if (get_user(id, arg)) | 709 | if (get_user(id, arg)) |
| 704 | return -EFAULT; | 710 | return -EFAULT; |
| 705 | 711 | ||
| 706 | down_write(&file->port->mutex); | 712 | mutex_lock(&file->port->file_mutex); |
| 713 | mutex_lock(&file->mutex); | ||
| 707 | 714 | ||
| 708 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { | 715 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { |
| 709 | ret = -EINVAL; | 716 | ret = -EINVAL; |
| @@ -714,11 +721,13 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | |||
| 714 | file->agent[id] = NULL; | 721 | file->agent[id] = NULL; |
| 715 | 722 | ||
| 716 | out: | 723 | out: |
| 717 | up_write(&file->port->mutex); | 724 | mutex_unlock(&file->mutex); |
| 718 | 725 | ||
| 719 | if (agent) | 726 | if (agent) |
| 720 | ib_unregister_mad_agent(agent); | 727 | ib_unregister_mad_agent(agent); |
| 721 | 728 | ||
| 729 | mutex_unlock(&file->port->file_mutex); | ||
| 730 | |||
| 722 | return ret; | 731 | return ret; |
| 723 | } | 732 | } |
| 724 | 733 | ||
| @@ -726,12 +735,12 @@ static long ib_umad_enable_pkey(struct ib_umad_file *file) | |||
| 726 | { | 735 | { |
| 727 | int ret = 0; | 736 | int ret = 0; |
| 728 | 737 | ||
| 729 | down_write(&file->port->mutex); | 738 | mutex_lock(&file->mutex); |
| 730 | if (file->already_used) | 739 | if (file->already_used) |
| 731 | ret = -EINVAL; | 740 | ret = -EINVAL; |
| 732 | else | 741 | else |
| 733 | file->use_pkey_index = 1; | 742 | file->use_pkey_index = 1; |
| 734 | up_write(&file->port->mutex); | 743 | mutex_unlock(&file->mutex); |
| 735 | 744 | ||
| 736 | return ret; | 745 | return ret; |
| 737 | } | 746 | } |
| @@ -783,7 +792,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp) | |||
| 783 | if (!port) | 792 | if (!port) |
| 784 | return -ENXIO; | 793 | return -ENXIO; |
| 785 | 794 | ||
| 786 | down_write(&port->mutex); | 795 | mutex_lock(&port->file_mutex); |
| 787 | 796 | ||
| 788 | if (!port->ib_dev) { | 797 | if (!port->ib_dev) { |
| 789 | ret = -ENXIO; | 798 | ret = -ENXIO; |
| @@ -797,7 +806,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp) | |||
| 797 | goto out; | 806 | goto out; |
| 798 | } | 807 | } |
| 799 | 808 | ||
| 800 | spin_lock_init(&file->recv_lock); | 809 | mutex_init(&file->mutex); |
| 801 | spin_lock_init(&file->send_lock); | 810 | spin_lock_init(&file->send_lock); |
| 802 | INIT_LIST_HEAD(&file->recv_list); | 811 | INIT_LIST_HEAD(&file->recv_list); |
| 803 | INIT_LIST_HEAD(&file->send_list); | 812 | INIT_LIST_HEAD(&file->send_list); |
| @@ -809,7 +818,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp) | |||
| 809 | list_add_tail(&file->port_list, &port->file_list); | 818 | list_add_tail(&file->port_list, &port->file_list); |
| 810 | 819 | ||
| 811 | out: | 820 | out: |
| 812 | up_write(&port->mutex); | 821 | mutex_unlock(&port->file_mutex); |
| 813 | return ret; | 822 | return ret; |
| 814 | } | 823 | } |
| 815 | 824 | ||
| @@ -821,7 +830,8 @@ static int ib_umad_close(struct inode *inode, struct file *filp) | |||
| 821 | int already_dead; | 830 | int already_dead; |
| 822 | int i; | 831 | int i; |
| 823 | 832 | ||
| 824 | down_write(&file->port->mutex); | 833 | mutex_lock(&file->port->file_mutex); |
| 834 | mutex_lock(&file->mutex); | ||
| 825 | 835 | ||
| 826 | already_dead = file->agents_dead; | 836 | already_dead = file->agents_dead; |
| 827 | file->agents_dead = 1; | 837 | file->agents_dead = 1; |
| @@ -834,14 +844,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp) | |||
| 834 | 844 | ||
| 835 | list_del(&file->port_list); | 845 | list_del(&file->port_list); |
| 836 | 846 | ||
| 837 | downgrade_write(&file->port->mutex); | 847 | mutex_unlock(&file->mutex); |
| 838 | 848 | ||
| 839 | if (!already_dead) | 849 | if (!already_dead) |
| 840 | for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) | 850 | for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) |
| 841 | if (file->agent[i]) | 851 | if (file->agent[i]) |
| 842 | ib_unregister_mad_agent(file->agent[i]); | 852 | ib_unregister_mad_agent(file->agent[i]); |
| 843 | 853 | ||
| 844 | up_read(&file->port->mutex); | 854 | mutex_unlock(&file->port->file_mutex); |
| 845 | 855 | ||
| 846 | kfree(file); | 856 | kfree(file); |
| 847 | kref_put(&dev->ref, ib_umad_release_dev); | 857 | kref_put(&dev->ref, ib_umad_release_dev); |
| @@ -914,10 +924,10 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) | |||
| 914 | }; | 924 | }; |
| 915 | int ret = 0; | 925 | int ret = 0; |
| 916 | 926 | ||
| 917 | down_write(&port->mutex); | 927 | mutex_lock(&port->file_mutex); |
| 918 | if (port->ib_dev) | 928 | if (port->ib_dev) |
| 919 | ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); | 929 | ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); |
| 920 | up_write(&port->mutex); | 930 | mutex_unlock(&port->file_mutex); |
| 921 | 931 | ||
| 922 | up(&port->sm_sem); | 932 | up(&port->sm_sem); |
| 923 | 933 | ||
| @@ -981,7 +991,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
| 981 | port->ib_dev = device; | 991 | port->ib_dev = device; |
| 982 | port->port_num = port_num; | 992 | port->port_num = port_num; |
| 983 | init_MUTEX(&port->sm_sem); | 993 | init_MUTEX(&port->sm_sem); |
| 984 | init_rwsem(&port->mutex); | 994 | mutex_init(&port->file_mutex); |
| 985 | INIT_LIST_HEAD(&port->file_list); | 995 | INIT_LIST_HEAD(&port->file_list); |
| 986 | 996 | ||
| 987 | port->dev = cdev_alloc(); | 997 | port->dev = cdev_alloc(); |
| @@ -1052,6 +1062,7 @@ err_cdev: | |||
| 1052 | static void ib_umad_kill_port(struct ib_umad_port *port) | 1062 | static void ib_umad_kill_port(struct ib_umad_port *port) |
| 1053 | { | 1063 | { |
| 1054 | struct ib_umad_file *file; | 1064 | struct ib_umad_file *file; |
| 1065 | int already_dead; | ||
| 1055 | int id; | 1066 | int id; |
| 1056 | 1067 | ||
| 1057 | class_set_devdata(port->class_dev, NULL); | 1068 | class_set_devdata(port->class_dev, NULL); |
| @@ -1067,42 +1078,22 @@ static void ib_umad_kill_port(struct ib_umad_port *port) | |||
| 1067 | umad_port[port->dev_num] = NULL; | 1078 | umad_port[port->dev_num] = NULL; |
| 1068 | spin_unlock(&port_lock); | 1079 | spin_unlock(&port_lock); |
| 1069 | 1080 | ||
| 1070 | down_write(&port->mutex); | 1081 | mutex_lock(&port->file_mutex); |
| 1071 | 1082 | ||
| 1072 | port->ib_dev = NULL; | 1083 | port->ib_dev = NULL; |
| 1073 | 1084 | ||
| 1074 | /* | 1085 | list_for_each_entry(file, &port->file_list, port_list) { |
| 1075 | * Now go through the list of files attached to this port and | 1086 | mutex_lock(&file->mutex); |
| 1076 | * unregister all of their MAD agents. We need to hold | 1087 | already_dead = file->agents_dead; |
| 1077 | * port->mutex while doing this to avoid racing with | ||
| 1078 | * ib_umad_close(), but we can't hold the mutex for writing | ||
| 1079 | * while calling ib_unregister_mad_agent(), since that might | ||
| 1080 | * deadlock by calling back into queue_packet(). So we | ||
| 1081 | * downgrade our lock to a read lock, and then drop and | ||
| 1082 | * reacquire the write lock for the next iteration. | ||
| 1083 | * | ||
| 1084 | * We do list_del_init() on the file's list_head so that the | ||
| 1085 | * list_del in ib_umad_close() is still OK, even after the | ||
| 1086 | * file is removed from the list. | ||
| 1087 | */ | ||
| 1088 | while (!list_empty(&port->file_list)) { | ||
| 1089 | file = list_entry(port->file_list.next, struct ib_umad_file, | ||
| 1090 | port_list); | ||
| 1091 | |||
| 1092 | file->agents_dead = 1; | 1088 | file->agents_dead = 1; |
| 1093 | list_del_init(&file->port_list); | 1089 | mutex_unlock(&file->mutex); |
| 1094 | |||
| 1095 | downgrade_write(&port->mutex); | ||
| 1096 | 1090 | ||
| 1097 | for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) | 1091 | for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) |
| 1098 | if (file->agent[id]) | 1092 | if (file->agent[id]) |
| 1099 | ib_unregister_mad_agent(file->agent[id]); | 1093 | ib_unregister_mad_agent(file->agent[id]); |
| 1100 | |||
| 1101 | up_read(&port->mutex); | ||
| 1102 | down_write(&port->mutex); | ||
| 1103 | } | 1094 | } |
| 1104 | 1095 | ||
| 1105 | up_write(&port->mutex); | 1096 | mutex_unlock(&port->file_mutex); |
| 1106 | 1097 | ||
| 1107 | clear_bit(port->dev_num, dev_map); | 1098 | clear_bit(port->dev_num, dev_map); |
| 1108 | } | 1099 | } |
