diff options
author | Mark Haverkamp <markh@osdl.org> | 2005-09-26 16:02:15 -0400 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-09-26 18:41:49 -0400 |
commit | 2f130980d14cb938226011875ca5224cd46dc1f9 (patch) | |
tree | 73c5898afdc04a2363369a1e8f1196e02fe41cef /drivers/scsi | |
parent | 7a8cf29d69e077dfe90e327859201fd9b75a47ce (diff) |
[SCSI] aacraid: aacraid: AIF preallocation (update)
Recevied from Mark Salyzyn from Adaptec.
Aif pre-allocation is used to pull the kmalloc outside of the locks.
Applies to the scsi-misc-2.6 git tree.
Signed-off-by: Mark Haverkamp <markh@osdl.org>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 100 |
1 files changed, 81 insertions, 19 deletions
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index a1d303f03480..3741be2f4bfa 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -805,7 +805,6 @@ int aac_command_thread(struct aac_dev * dev) | |||
805 | { | 805 | { |
806 | struct hw_fib *hw_fib, *hw_newfib; | 806 | struct hw_fib *hw_fib, *hw_newfib; |
807 | struct fib *fib, *newfib; | 807 | struct fib *fib, *newfib; |
808 | struct aac_queue_block *queues = dev->queues; | ||
809 | struct aac_fib_context *fibctx; | 808 | struct aac_fib_context *fibctx; |
810 | unsigned long flags; | 809 | unsigned long flags; |
811 | DECLARE_WAITQUEUE(wait, current); | 810 | DECLARE_WAITQUEUE(wait, current); |
@@ -825,21 +824,22 @@ int aac_command_thread(struct aac_dev * dev) | |||
825 | * Let the DPC know it has a place to send the AIF's to. | 824 | * Let the DPC know it has a place to send the AIF's to. |
826 | */ | 825 | */ |
827 | dev->aif_thread = 1; | 826 | dev->aif_thread = 1; |
828 | add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); | 827 | add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); |
829 | set_current_state(TASK_INTERRUPTIBLE); | 828 | set_current_state(TASK_INTERRUPTIBLE); |
829 | dprintk ((KERN_INFO "aac_command_thread start\n")); | ||
830 | while(1) | 830 | while(1) |
831 | { | 831 | { |
832 | spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); | 832 | spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); |
833 | while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { | 833 | while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { |
834 | struct list_head *entry; | 834 | struct list_head *entry; |
835 | struct aac_aifcmd * aifcmd; | 835 | struct aac_aifcmd * aifcmd; |
836 | 836 | ||
837 | set_current_state(TASK_RUNNING); | 837 | set_current_state(TASK_RUNNING); |
838 | 838 | ||
839 | entry = queues->queue[HostNormCmdQueue].cmdq.next; | 839 | entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; |
840 | list_del(entry); | 840 | list_del(entry); |
841 | 841 | ||
842 | spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); | 842 | spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); |
843 | fib = list_entry(entry, struct fib, fiblink); | 843 | fib = list_entry(entry, struct fib, fiblink); |
844 | /* | 844 | /* |
845 | * We will process the FIB here or pass it to a | 845 | * We will process the FIB here or pass it to a |
@@ -869,9 +869,54 @@ int aac_command_thread(struct aac_dev * dev) | |||
869 | 869 | ||
870 | u32 time_now, time_last; | 870 | u32 time_now, time_last; |
871 | unsigned long flagv; | 871 | unsigned long flagv; |
872 | unsigned num; | ||
873 | struct hw_fib ** hw_fib_pool, ** hw_fib_p; | ||
874 | struct fib ** fib_pool, ** fib_p; | ||
872 | 875 | ||
873 | time_now = jiffies/HZ; | 876 | time_now = jiffies/HZ; |
874 | 877 | ||
878 | /* | ||
879 | * Warning: no sleep allowed while | ||
880 | * holding spinlock. We take the estimate | ||
881 | * and pre-allocate a set of fibs outside the | ||
882 | * lock. | ||
883 | */ | ||
884 | num = le32_to_cpu(dev->init->AdapterFibsSize) | ||
885 | / sizeof(struct hw_fib); /* some extra */ | ||
886 | spin_lock_irqsave(&dev->fib_lock, flagv); | ||
887 | entry = dev->fib_list.next; | ||
888 | while (entry != &dev->fib_list) { | ||
889 | entry = entry->next; | ||
890 | ++num; | ||
891 | } | ||
892 | spin_unlock_irqrestore(&dev->fib_lock, flagv); | ||
893 | hw_fib_pool = NULL; | ||
894 | fib_pool = NULL; | ||
895 | if (num | ||
896 | && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) | ||
897 | && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { | ||
898 | hw_fib_p = hw_fib_pool; | ||
899 | fib_p = fib_pool; | ||
900 | while (hw_fib_p < &hw_fib_pool[num]) { | ||
901 | if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { | ||
902 | --hw_fib_p; | ||
903 | break; | ||
904 | } | ||
905 | if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { | ||
906 | kfree(*(--hw_fib_p)); | ||
907 | break; | ||
908 | } | ||
909 | } | ||
910 | if ((num = hw_fib_p - hw_fib_pool) == 0) { | ||
911 | kfree(fib_pool); | ||
912 | fib_pool = NULL; | ||
913 | kfree(hw_fib_pool); | ||
914 | hw_fib_pool = NULL; | ||
915 | } | ||
916 | } else if (hw_fib_pool) { | ||
917 | kfree(hw_fib_pool); | ||
918 | hw_fib_pool = NULL; | ||
919 | } | ||
875 | spin_lock_irqsave(&dev->fib_lock, flagv); | 920 | spin_lock_irqsave(&dev->fib_lock, flagv); |
876 | entry = dev->fib_list.next; | 921 | entry = dev->fib_list.next; |
877 | /* | 922 | /* |
@@ -880,6 +925,8 @@ int aac_command_thread(struct aac_dev * dev) | |||
880 | * fib, and then set the event to wake up the | 925 | * fib, and then set the event to wake up the |
881 | * thread that is waiting for it. | 926 | * thread that is waiting for it. |
882 | */ | 927 | */ |
928 | hw_fib_p = hw_fib_pool; | ||
929 | fib_p = fib_pool; | ||
883 | while (entry != &dev->fib_list) { | 930 | while (entry != &dev->fib_list) { |
884 | /* | 931 | /* |
885 | * Extract the fibctx | 932 | * Extract the fibctx |
@@ -912,9 +959,11 @@ int aac_command_thread(struct aac_dev * dev) | |||
912 | * Warning: no sleep allowed while | 959 | * Warning: no sleep allowed while |
913 | * holding spinlock | 960 | * holding spinlock |
914 | */ | 961 | */ |
915 | hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); | 962 | if (hw_fib_p < &hw_fib_pool[num]) { |
916 | newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); | 963 | hw_newfib = *hw_fib_p; |
917 | if (newfib && hw_newfib) { | 964 | *(hw_fib_p++) = NULL; |
965 | newfib = *fib_p; | ||
966 | *(fib_p++) = NULL; | ||
918 | /* | 967 | /* |
919 | * Make the copy of the FIB | 968 | * Make the copy of the FIB |
920 | */ | 969 | */ |
@@ -929,15 +978,11 @@ int aac_command_thread(struct aac_dev * dev) | |||
929 | fibctx->count++; | 978 | fibctx->count++; |
930 | /* | 979 | /* |
931 | * Set the event to wake up the | 980 | * Set the event to wake up the |
932 | * thread that will waiting. | 981 | * thread that is waiting. |
933 | */ | 982 | */ |
934 | up(&fibctx->wait_sem); | 983 | up(&fibctx->wait_sem); |
935 | } else { | 984 | } else { |
936 | printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); | 985 | printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); |
937 | if(newfib) | ||
938 | kfree(newfib); | ||
939 | if(hw_newfib) | ||
940 | kfree(hw_newfib); | ||
941 | } | 986 | } |
942 | entry = entry->next; | 987 | entry = entry->next; |
943 | } | 988 | } |
@@ -947,21 +992,38 @@ int aac_command_thread(struct aac_dev * dev) | |||
947 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); | 992 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); |
948 | fib_adapter_complete(fib, sizeof(u32)); | 993 | fib_adapter_complete(fib, sizeof(u32)); |
949 | spin_unlock_irqrestore(&dev->fib_lock, flagv); | 994 | spin_unlock_irqrestore(&dev->fib_lock, flagv); |
995 | /* Free up the remaining resources */ | ||
996 | hw_fib_p = hw_fib_pool; | ||
997 | fib_p = fib_pool; | ||
998 | while (hw_fib_p < &hw_fib_pool[num]) { | ||
999 | if (*hw_fib_p) | ||
1000 | kfree(*hw_fib_p); | ||
1001 | if (*fib_p) | ||
1002 | kfree(*fib_p); | ||
1003 | ++fib_p; | ||
1004 | ++hw_fib_p; | ||
1005 | } | ||
1006 | if (hw_fib_pool) | ||
1007 | kfree(hw_fib_pool); | ||
1008 | if (fib_pool) | ||
1009 | kfree(fib_pool); | ||
950 | } | 1010 | } |
951 | spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); | ||
952 | kfree(fib); | 1011 | kfree(fib); |
1012 | spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); | ||
953 | } | 1013 | } |
954 | /* | 1014 | /* |
955 | * There are no more AIF's | 1015 | * There are no more AIF's |
956 | */ | 1016 | */ |
957 | spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); | 1017 | spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); |
958 | schedule(); | 1018 | schedule(); |
959 | 1019 | ||
960 | if(signal_pending(current)) | 1020 | if(signal_pending(current)) |
961 | break; | 1021 | break; |
962 | set_current_state(TASK_INTERRUPTIBLE); | 1022 | set_current_state(TASK_INTERRUPTIBLE); |
963 | } | 1023 | } |
964 | remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); | 1024 | if (dev->queues) |
1025 | remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); | ||
965 | dev->aif_thread = 0; | 1026 | dev->aif_thread = 0; |
966 | complete_and_exit(&dev->aif_completion, 0); | 1027 | complete_and_exit(&dev->aif_completion, 0); |
1028 | return 0; | ||
967 | } | 1029 | } |