diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 14:08:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 14:08:17 -0400 |
commit | 5047887caf1806f31652210df27fb62a7c43f27d (patch) | |
tree | 4098ead40c1aa7b904167f67cff87a247cfa0b6c /drivers | |
parent | 996abf053eec4d67136be8b911bbaaf989cfb99c (diff) | |
parent | 973b7d83ebeb1e34b8bee69208916e5f0e2353c3 (diff) |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (34 commits)
powerpc: Wireup new syscalls
Move update_mmu_cache() declaration from tlbflush.h to pgtable.h
powerpc/pseries: Remove kmalloc call in handling writes to lparcfg
powerpc/pseries: Update arch vector to indicate support for CMO
ibmvfc: Add support for collaborative memory overcommit
ibmvscsi: driver enablement for CMO
ibmveth: enable driver for CMO
ibmveth: Automatically enable larger rx buffer pools for larger mtu
powerpc/pseries: Verify CMO memory entitlement updates with virtual I/O
powerpc/pseries: vio bus support for CMO
powerpc/pseries: iommu enablement for CMO
powerpc/pseries: Add CMO paging statistics
powerpc/pseries: Add collaborative memory manager
powerpc/pseries: Utilities to set firmware page state
powerpc/pseries: Enable CMO feature during platform setup
powerpc/pseries: Split retrieval of processor entitlement data into a helper routine
powerpc/pseries: Add memory entitlement capabilities to /proc/ppc64/lparcfg
powerpc/pseries: Split processor entitlement retrieval and gathering to helper routines
powerpc/pseries: Remove extraneous error reporting for hcall failures in lparcfg
powerpc: Fix compile error with binutils 2.15
...
Fixed up conflict in arch/powerpc/platforms/52xx/Kconfig manually.
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ibmveth.c | 189 | ||||
-rw-r--r-- | drivers/net/ibmveth.h | 5 | ||||
-rw-r--r-- | drivers/of/of_i2c.c | 2 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 15 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 45 | ||||
-rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.h | 2 |
6 files changed, 208 insertions, 50 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 00527805e4f1..e5a6e2e84540 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -33,6 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/moduleparam.h> | ||
36 | #include <linux/types.h> | 37 | #include <linux/types.h> |
37 | #include <linux/errno.h> | 38 | #include <linux/errno.h> |
38 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
@@ -52,7 +53,9 @@ | |||
52 | #include <asm/hvcall.h> | 53 | #include <asm/hvcall.h> |
53 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
54 | #include <asm/vio.h> | 55 | #include <asm/vio.h> |
56 | #include <asm/iommu.h> | ||
55 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
58 | #include <asm/firmware.h> | ||
56 | #include <linux/seq_file.h> | 59 | #include <linux/seq_file.h> |
57 | 60 | ||
58 | #include "ibmveth.h" | 61 | #include "ibmveth.h" |
@@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); | |||
94 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); | 97 | static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); |
95 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); | 98 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); |
96 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); | 99 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); |
100 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); | ||
97 | static struct kobj_type ktype_veth_pool; | 101 | static struct kobj_type ktype_veth_pool; |
98 | 102 | ||
103 | |||
99 | #ifdef CONFIG_PROC_FS | 104 | #ifdef CONFIG_PROC_FS |
100 | #define IBMVETH_PROC_DIR "ibmveth" | 105 | #define IBMVETH_PROC_DIR "ibmveth" |
101 | static struct proc_dir_entry *ibmveth_proc_dir; | 106 | static struct proc_dir_entry *ibmveth_proc_dir; |
@@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
226 | u32 i; | 231 | u32 i; |
227 | u32 count = pool->size - atomic_read(&pool->available); | 232 | u32 count = pool->size - atomic_read(&pool->available); |
228 | u32 buffers_added = 0; | 233 | u32 buffers_added = 0; |
234 | struct sk_buff *skb; | ||
235 | unsigned int free_index, index; | ||
236 | u64 correlator; | ||
237 | unsigned long lpar_rc; | ||
238 | dma_addr_t dma_addr; | ||
229 | 239 | ||
230 | mb(); | 240 | mb(); |
231 | 241 | ||
232 | for(i = 0; i < count; ++i) { | 242 | for(i = 0; i < count; ++i) { |
233 | struct sk_buff *skb; | ||
234 | unsigned int free_index, index; | ||
235 | u64 correlator; | ||
236 | union ibmveth_buf_desc desc; | 243 | union ibmveth_buf_desc desc; |
237 | unsigned long lpar_rc; | ||
238 | dma_addr_t dma_addr; | ||
239 | 244 | ||
240 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); | 245 | skb = alloc_skb(pool->buff_size, GFP_ATOMIC); |
241 | 246 | ||
@@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
255 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
256 | pool->buff_size, DMA_FROM_DEVICE); | 261 | pool->buff_size, DMA_FROM_DEVICE); |
257 | 262 | ||
263 | if (dma_mapping_error(dma_addr)) | ||
264 | goto failure; | ||
265 | |||
258 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | 266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; |
259 | pool->dma_addr[index] = dma_addr; | 267 | pool->dma_addr[index] = dma_addr; |
260 | pool->skbuff[index] = skb; | 268 | pool->skbuff[index] = skb; |
@@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
267 | 275 | ||
268 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); | 276 | lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); |
269 | 277 | ||
270 | if(lpar_rc != H_SUCCESS) { | 278 | if (lpar_rc != H_SUCCESS) |
271 | pool->free_map[free_index] = index; | 279 | goto failure; |
272 | pool->skbuff[index] = NULL; | 280 | else { |
273 | if (pool->consumer_index == 0) | ||
274 | pool->consumer_index = pool->size - 1; | ||
275 | else | ||
276 | pool->consumer_index--; | ||
277 | dma_unmap_single(&adapter->vdev->dev, | ||
278 | pool->dma_addr[index], pool->buff_size, | ||
279 | DMA_FROM_DEVICE); | ||
280 | dev_kfree_skb_any(skb); | ||
281 | adapter->replenish_add_buff_failure++; | ||
282 | break; | ||
283 | } else { | ||
284 | buffers_added++; | 281 | buffers_added++; |
285 | adapter->replenish_add_buff_success++; | 282 | adapter->replenish_add_buff_success++; |
286 | } | 283 | } |
@@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
288 | 285 | ||
289 | mb(); | 286 | mb(); |
290 | atomic_add(buffers_added, &(pool->available)); | 287 | atomic_add(buffers_added, &(pool->available)); |
288 | return; | ||
289 | |||
290 | failure: | ||
291 | pool->free_map[free_index] = index; | ||
292 | pool->skbuff[index] = NULL; | ||
293 | if (pool->consumer_index == 0) | ||
294 | pool->consumer_index = pool->size - 1; | ||
295 | else | ||
296 | pool->consumer_index--; | ||
297 | if (!dma_mapping_error(dma_addr)) | ||
298 | dma_unmap_single(&adapter->vdev->dev, | ||
299 | pool->dma_addr[index], pool->buff_size, | ||
300 | DMA_FROM_DEVICE); | ||
301 | dev_kfree_skb_any(skb); | ||
302 | adapter->replenish_add_buff_failure++; | ||
303 | |||
304 | mb(); | ||
305 | atomic_add(buffers_added, &(pool->available)); | ||
291 | } | 306 | } |
292 | 307 | ||
293 | /* replenish routine */ | 308 | /* replenish routine */ |
@@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) | |||
297 | 312 | ||
298 | adapter->replenish_task_cycles++; | 313 | adapter->replenish_task_cycles++; |
299 | 314 | ||
300 | for(i = 0; i < IbmVethNumBufferPools; i++) | 315 | for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) |
301 | if(adapter->rx_buff_pool[i].active) | 316 | if(adapter->rx_buff_pool[i].active) |
302 | ibmveth_replenish_buffer_pool(adapter, | 317 | ibmveth_replenish_buffer_pool(adapter, |
303 | &adapter->rx_buff_pool[i]); | 318 | &adapter->rx_buff_pool[i]); |
@@ -472,6 +487,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
472 | if (adapter->rx_buff_pool[i].active) | 487 | if (adapter->rx_buff_pool[i].active) |
473 | ibmveth_free_buffer_pool(adapter, | 488 | ibmveth_free_buffer_pool(adapter, |
474 | &adapter->rx_buff_pool[i]); | 489 | &adapter->rx_buff_pool[i]); |
490 | |||
491 | if (adapter->bounce_buffer != NULL) { | ||
492 | if (!dma_mapping_error(adapter->bounce_buffer_dma)) { | ||
493 | dma_unmap_single(&adapter->vdev->dev, | ||
494 | adapter->bounce_buffer_dma, | ||
495 | adapter->netdev->mtu + IBMVETH_BUFF_OH, | ||
496 | DMA_BIDIRECTIONAL); | ||
497 | adapter->bounce_buffer_dma = DMA_ERROR_CODE; | ||
498 | } | ||
499 | kfree(adapter->bounce_buffer); | ||
500 | adapter->bounce_buffer = NULL; | ||
501 | } | ||
475 | } | 502 | } |
476 | 503 | ||
477 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, | 504 | static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, |
@@ -607,6 +634,24 @@ static int ibmveth_open(struct net_device *netdev) | |||
607 | return rc; | 634 | return rc; |
608 | } | 635 | } |
609 | 636 | ||
637 | adapter->bounce_buffer = | ||
638 | kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); | ||
639 | if (!adapter->bounce_buffer) { | ||
640 | ibmveth_error_printk("unable to allocate bounce buffer\n"); | ||
641 | ibmveth_cleanup(adapter); | ||
642 | napi_disable(&adapter->napi); | ||
643 | return -ENOMEM; | ||
644 | } | ||
645 | adapter->bounce_buffer_dma = | ||
646 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | ||
647 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | ||
648 | if (dma_mapping_error(adapter->bounce_buffer_dma)) { | ||
649 | ibmveth_error_printk("unable to map bounce buffer\n"); | ||
650 | ibmveth_cleanup(adapter); | ||
651 | napi_disable(&adapter->napi); | ||
652 | return -ENOMEM; | ||
653 | } | ||
654 | |||
610 | ibmveth_debug_printk("initial replenish cycle\n"); | 655 | ibmveth_debug_printk("initial replenish cycle\n"); |
611 | ibmveth_interrupt(netdev->irq, netdev); | 656 | ibmveth_interrupt(netdev->irq, netdev); |
612 | 657 | ||
@@ -853,10 +898,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
853 | unsigned int tx_packets = 0; | 898 | unsigned int tx_packets = 0; |
854 | unsigned int tx_send_failed = 0; | 899 | unsigned int tx_send_failed = 0; |
855 | unsigned int tx_map_failed = 0; | 900 | unsigned int tx_map_failed = 0; |
901 | int used_bounce = 0; | ||
902 | unsigned long data_dma_addr; | ||
856 | 903 | ||
857 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; | 904 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; |
858 | desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | 905 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
859 | skb->len, DMA_TO_DEVICE); | 906 | skb->len, DMA_TO_DEVICE); |
860 | 907 | ||
861 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 908 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
862 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 909 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
@@ -875,12 +922,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
875 | buf[1] = 0; | 922 | buf[1] = 0; |
876 | } | 923 | } |
877 | 924 | ||
878 | if (dma_mapping_error(desc.fields.address)) { | 925 | if (dma_mapping_error(data_dma_addr)) { |
879 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 926 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
927 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | ||
928 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | ||
929 | skb->len); | ||
930 | desc.fields.address = adapter->bounce_buffer_dma; | ||
880 | tx_map_failed++; | 931 | tx_map_failed++; |
881 | tx_dropped++; | 932 | used_bounce = 1; |
882 | goto out; | 933 | } else |
883 | } | 934 | desc.fields.address = data_dma_addr; |
884 | 935 | ||
885 | /* send the frame. Arbitrarily set retrycount to 1024 */ | 936 | /* send the frame. Arbitrarily set retrycount to 1024 */ |
886 | correlator = 0; | 937 | correlator = 0; |
@@ -904,8 +955,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
904 | netdev->trans_start = jiffies; | 955 | netdev->trans_start = jiffies; |
905 | } | 956 | } |
906 | 957 | ||
907 | dma_unmap_single(&adapter->vdev->dev, desc.fields.address, | 958 | if (!used_bounce) |
908 | skb->len, DMA_TO_DEVICE); | 959 | dma_unmap_single(&adapter->vdev->dev, data_dma_addr, |
960 | skb->len, DMA_TO_DEVICE); | ||
909 | 961 | ||
910 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 962 | out: spin_lock_irqsave(&adapter->stats_lock, flags); |
911 | netdev->stats.tx_dropped += tx_dropped; | 963 | netdev->stats.tx_dropped += tx_dropped; |
@@ -1053,9 +1105,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
1053 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | 1105 | static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) |
1054 | { | 1106 | { |
1055 | struct ibmveth_adapter *adapter = dev->priv; | 1107 | struct ibmveth_adapter *adapter = dev->priv; |
1108 | struct vio_dev *viodev = adapter->vdev; | ||
1056 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | 1109 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; |
1057 | int reinit = 0; | 1110 | int i; |
1058 | int i, rc; | ||
1059 | 1111 | ||
1060 | if (new_mtu < IBMVETH_MAX_MTU) | 1112 | if (new_mtu < IBMVETH_MAX_MTU) |
1061 | return -EINVAL; | 1113 | return -EINVAL; |
@@ -1067,23 +1119,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1067 | if (i == IbmVethNumBufferPools) | 1119 | if (i == IbmVethNumBufferPools) |
1068 | return -EINVAL; | 1120 | return -EINVAL; |
1069 | 1121 | ||
1122 | /* Deactivate all the buffer pools so that the next loop can activate | ||
1123 | only the buffer pools necessary to hold the new MTU */ | ||
1124 | for (i = 0; i < IbmVethNumBufferPools; i++) | ||
1125 | if (adapter->rx_buff_pool[i].active) { | ||
1126 | ibmveth_free_buffer_pool(adapter, | ||
1127 | &adapter->rx_buff_pool[i]); | ||
1128 | adapter->rx_buff_pool[i].active = 0; | ||
1129 | } | ||
1130 | |||
1070 | /* Look for an active buffer pool that can hold the new MTU */ | 1131 | /* Look for an active buffer pool that can hold the new MTU */ |
1071 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1132 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
1072 | if (!adapter->rx_buff_pool[i].active) { | 1133 | adapter->rx_buff_pool[i].active = 1; |
1073 | adapter->rx_buff_pool[i].active = 1; | ||
1074 | reinit = 1; | ||
1075 | } | ||
1076 | 1134 | ||
1077 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1135 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
1078 | if (reinit && netif_running(adapter->netdev)) { | 1136 | if (netif_running(adapter->netdev)) { |
1079 | adapter->pool_config = 1; | 1137 | adapter->pool_config = 1; |
1080 | ibmveth_close(adapter->netdev); | 1138 | ibmveth_close(adapter->netdev); |
1081 | adapter->pool_config = 0; | 1139 | adapter->pool_config = 0; |
1082 | dev->mtu = new_mtu; | 1140 | dev->mtu = new_mtu; |
1083 | if ((rc = ibmveth_open(adapter->netdev))) | 1141 | vio_cmo_set_dev_desired(viodev, |
1084 | return rc; | 1142 | ibmveth_get_desired_dma |
1085 | } else | 1143 | (viodev)); |
1086 | dev->mtu = new_mtu; | 1144 | return ibmveth_open(adapter->netdev); |
1145 | } | ||
1146 | dev->mtu = new_mtu; | ||
1147 | vio_cmo_set_dev_desired(viodev, | ||
1148 | ibmveth_get_desired_dma | ||
1149 | (viodev)); | ||
1087 | return 0; | 1150 | return 0; |
1088 | } | 1151 | } |
1089 | } | 1152 | } |
@@ -1098,6 +1161,46 @@ static void ibmveth_poll_controller(struct net_device *dev) | |||
1098 | } | 1161 | } |
1099 | #endif | 1162 | #endif |
1100 | 1163 | ||
1164 | /** | ||
1165 | * ibmveth_get_desired_dma - Calculate IO memory desired by the driver | ||
1166 | * | ||
1167 | * @vdev: struct vio_dev for the device whose desired IO mem is to be returned | ||
1168 | * | ||
1169 | * Return value: | ||
1170 | * Number of bytes of IO data the driver will need to perform well. | ||
1171 | */ | ||
1172 | static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | ||
1173 | { | ||
1174 | struct net_device *netdev = dev_get_drvdata(&vdev->dev); | ||
1175 | struct ibmveth_adapter *adapter; | ||
1176 | unsigned long ret; | ||
1177 | int i; | ||
1178 | int rxqentries = 1; | ||
1179 | |||
1180 | /* netdev inits at probe time along with the structures we need below*/ | ||
1181 | if (netdev == NULL) | ||
1182 | return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); | ||
1183 | |||
1184 | adapter = netdev_priv(netdev); | ||
1185 | |||
1186 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | ||
1187 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); | ||
1188 | |||
1189 | for (i = 0; i < IbmVethNumBufferPools; i++) { | ||
1190 | /* add the size of the active receive buffers */ | ||
1191 | if (adapter->rx_buff_pool[i].active) | ||
1192 | ret += | ||
1193 | adapter->rx_buff_pool[i].size * | ||
1194 | IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. | ||
1195 | buff_size); | ||
1196 | rxqentries += adapter->rx_buff_pool[i].size; | ||
1197 | } | ||
1198 | /* add the size of the receive queue entries */ | ||
1199 | ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); | ||
1200 | |||
1201 | return ret; | ||
1202 | } | ||
1203 | |||
1101 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | 1204 | static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
1102 | { | 1205 | { |
1103 | int rc, i; | 1206 | int rc, i; |
@@ -1242,6 +1345,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) | |||
1242 | ibmveth_proc_unregister_adapter(adapter); | 1345 | ibmveth_proc_unregister_adapter(adapter); |
1243 | 1346 | ||
1244 | free_netdev(netdev); | 1347 | free_netdev(netdev); |
1348 | dev_set_drvdata(&dev->dev, NULL); | ||
1349 | |||
1245 | return 0; | 1350 | return 0; |
1246 | } | 1351 | } |
1247 | 1352 | ||
@@ -1402,14 +1507,15 @@ const char * buf, size_t count) | |||
1402 | return -EPERM; | 1507 | return -EPERM; |
1403 | } | 1508 | } |
1404 | 1509 | ||
1405 | pool->active = 0; | ||
1406 | if (netif_running(netdev)) { | 1510 | if (netif_running(netdev)) { |
1407 | adapter->pool_config = 1; | 1511 | adapter->pool_config = 1; |
1408 | ibmveth_close(netdev); | 1512 | ibmveth_close(netdev); |
1513 | pool->active = 0; | ||
1409 | adapter->pool_config = 0; | 1514 | adapter->pool_config = 0; |
1410 | if ((rc = ibmveth_open(netdev))) | 1515 | if ((rc = ibmveth_open(netdev))) |
1411 | return rc; | 1516 | return rc; |
1412 | } | 1517 | } |
1518 | pool->active = 0; | ||
1413 | } | 1519 | } |
1414 | } else if (attr == &veth_num_attr) { | 1520 | } else if (attr == &veth_num_attr) { |
1415 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) | 1521 | if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) |
@@ -1485,6 +1591,7 @@ static struct vio_driver ibmveth_driver = { | |||
1485 | .id_table = ibmveth_device_table, | 1591 | .id_table = ibmveth_device_table, |
1486 | .probe = ibmveth_probe, | 1592 | .probe = ibmveth_probe, |
1487 | .remove = ibmveth_remove, | 1593 | .remove = ibmveth_remove, |
1594 | .get_desired_dma = ibmveth_get_desired_dma, | ||
1488 | .driver = { | 1595 | .driver = { |
1489 | .name = ibmveth_driver_name, | 1596 | .name = ibmveth_driver_name, |
1490 | .owner = THIS_MODULE, | 1597 | .owner = THIS_MODULE, |
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 41f61cd18852..d28186948752 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h | |||
@@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address, | |||
93 | plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) | 93 | plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) |
94 | 94 | ||
95 | #define IbmVethNumBufferPools 5 | 95 | #define IbmVethNumBufferPools 5 |
96 | #define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ | ||
96 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ | 97 | #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ |
97 | #define IBMVETH_MAX_MTU 68 | 98 | #define IBMVETH_MAX_MTU 68 |
98 | #define IBMVETH_MAX_POOL_COUNT 4096 | 99 | #define IBMVETH_MAX_POOL_COUNT 4096 |
100 | #define IBMVETH_BUFF_LIST_SIZE 4096 | ||
101 | #define IBMVETH_FILT_LIST_SIZE 4096 | ||
99 | #define IBMVETH_MAX_BUF_SIZE (1024 * 128) | 102 | #define IBMVETH_MAX_BUF_SIZE (1024 * 128) |
100 | 103 | ||
101 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; | 104 | static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; |
@@ -143,6 +146,8 @@ struct ibmveth_adapter { | |||
143 | struct ibmveth_rx_q rx_queue; | 146 | struct ibmveth_rx_q rx_queue; |
144 | int pool_config; | 147 | int pool_config; |
145 | int rx_csum; | 148 | int rx_csum; |
149 | void *bounce_buffer; | ||
150 | dma_addr_t bounce_buffer_dma; | ||
146 | 151 | ||
147 | /* adapter specific stats */ | 152 | /* adapter specific stats */ |
148 | u64 replenish_task_cycles; | 153 | u64 replenish_task_cycles; |
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index 5c015d310d4a..344e1b03dd8b 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c | |||
@@ -91,8 +91,6 @@ void of_register_i2c_devices(struct i2c_adapter *adap, | |||
91 | } | 91 | } |
92 | 92 | ||
93 | info.irq = irq_of_parse_and_map(node, 0); | 93 | info.irq = irq_of_parse_and_map(node, 0); |
94 | if (info.irq == NO_IRQ) | ||
95 | info.irq = -1; | ||
96 | 94 | ||
97 | if (of_find_i2c_driver(node, &info) < 0) { | 95 | if (of_find_i2c_driver(node, &info) < 0) { |
98 | irq_dispose_mapping(info.irq); | 96 | irq_dispose_mapping(info.irq); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index eb702b96d57c..c4a7c06793c5 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -3819,6 +3819,20 @@ static int ibmvfc_remove(struct vio_dev *vdev) | |||
3819 | return 0; | 3819 | return 0; |
3820 | } | 3820 | } |
3821 | 3821 | ||
3822 | /** | ||
3823 | * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver | ||
3824 | * @vdev: vio device struct | ||
3825 | * | ||
3826 | * Return value: | ||
3827 | * Number of bytes the driver will need to DMA map at the same time in | ||
3828 | * order to perform well. | ||
3829 | */ | ||
3830 | static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev) | ||
3831 | { | ||
3832 | unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu); | ||
3833 | return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun); | ||
3834 | } | ||
3835 | |||
3822 | static struct vio_device_id ibmvfc_device_table[] __devinitdata = { | 3836 | static struct vio_device_id ibmvfc_device_table[] __devinitdata = { |
3823 | {"fcp", "IBM,vfc-client"}, | 3837 | {"fcp", "IBM,vfc-client"}, |
3824 | { "", "" } | 3838 | { "", "" } |
@@ -3829,6 +3843,7 @@ static struct vio_driver ibmvfc_driver = { | |||
3829 | .id_table = ibmvfc_device_table, | 3843 | .id_table = ibmvfc_device_table, |
3830 | .probe = ibmvfc_probe, | 3844 | .probe = ibmvfc_probe, |
3831 | .remove = ibmvfc_remove, | 3845 | .remove = ibmvfc_remove, |
3846 | .get_desired_dma = ibmvfc_get_desired_dma, | ||
3832 | .driver = { | 3847 | .driver = { |
3833 | .name = IBMVFC_NAME, | 3848 | .name = IBMVFC_NAME, |
3834 | .owner = THIS_MODULE, | 3849 | .owner = THIS_MODULE, |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 5d23368a1bce..20000ec79b04 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -72,6 +72,7 @@ | |||
72 | #include <linux/delay.h> | 72 | #include <linux/delay.h> |
73 | #include <asm/firmware.h> | 73 | #include <asm/firmware.h> |
74 | #include <asm/vio.h> | 74 | #include <asm/vio.h> |
75 | #include <asm/firmware.h> | ||
75 | #include <scsi/scsi.h> | 76 | #include <scsi/scsi.h> |
76 | #include <scsi/scsi_cmnd.h> | 77 | #include <scsi/scsi_cmnd.h> |
77 | #include <scsi/scsi_host.h> | 78 | #include <scsi/scsi_host.h> |
@@ -426,8 +427,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
426 | SG_ALL * sizeof(struct srp_direct_buf), | 427 | SG_ALL * sizeof(struct srp_direct_buf), |
427 | &evt_struct->ext_list_token, 0); | 428 | &evt_struct->ext_list_token, 0); |
428 | if (!evt_struct->ext_list) { | 429 | if (!evt_struct->ext_list) { |
429 | sdev_printk(KERN_ERR, cmd->device, | 430 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
430 | "Can't allocate memory for indirect table\n"); | 431 | sdev_printk(KERN_ERR, cmd->device, |
432 | "Can't allocate memory " | ||
433 | "for indirect table\n"); | ||
431 | return 0; | 434 | return 0; |
432 | } | 435 | } |
433 | } | 436 | } |
@@ -743,7 +746,9 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
743 | srp_cmd->lun = ((u64) lun) << 48; | 746 | srp_cmd->lun = ((u64) lun) << 48; |
744 | 747 | ||
745 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { | 748 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { |
746 | sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n"); | 749 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
750 | sdev_printk(KERN_ERR, cmnd->device, | ||
751 | "couldn't convert cmd to srp_cmd\n"); | ||
747 | free_event_struct(&hostdata->pool, evt_struct); | 752 | free_event_struct(&hostdata->pool, evt_struct); |
748 | return SCSI_MLQUEUE_HOST_BUSY; | 753 | return SCSI_MLQUEUE_HOST_BUSY; |
749 | } | 754 | } |
@@ -855,7 +860,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
855 | DMA_BIDIRECTIONAL); | 860 | DMA_BIDIRECTIONAL); |
856 | 861 | ||
857 | if (dma_mapping_error(req->buffer)) { | 862 | if (dma_mapping_error(req->buffer)) { |
858 | dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n"); | 863 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
864 | dev_err(hostdata->dev, | ||
865 | "Unable to map request_buffer for " | ||
866 | "adapter_info!\n"); | ||
859 | free_event_struct(&hostdata->pool, evt_struct); | 867 | free_event_struct(&hostdata->pool, evt_struct); |
860 | return; | 868 | return; |
861 | } | 869 | } |
@@ -1400,7 +1408,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1400 | DMA_BIDIRECTIONAL); | 1408 | DMA_BIDIRECTIONAL); |
1401 | 1409 | ||
1402 | if (dma_mapping_error(host_config->buffer)) { | 1410 | if (dma_mapping_error(host_config->buffer)) { |
1403 | dev_err(hostdata->dev, "dma_mapping error getting host config\n"); | 1411 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
1412 | dev_err(hostdata->dev, | ||
1413 | "dma_mapping error getting host config\n"); | ||
1404 | free_event_struct(&hostdata->pool, evt_struct); | 1414 | free_event_struct(&hostdata->pool, evt_struct); |
1405 | return -1; | 1415 | return -1; |
1406 | } | 1416 | } |
@@ -1604,7 +1614,7 @@ static struct scsi_host_template driver_template = { | |||
1604 | .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, | 1614 | .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler, |
1605 | .slave_configure = ibmvscsi_slave_configure, | 1615 | .slave_configure = ibmvscsi_slave_configure, |
1606 | .change_queue_depth = ibmvscsi_change_queue_depth, | 1616 | .change_queue_depth = ibmvscsi_change_queue_depth, |
1607 | .cmd_per_lun = 16, | 1617 | .cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT, |
1608 | .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, | 1618 | .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, |
1609 | .this_id = -1, | 1619 | .this_id = -1, |
1610 | .sg_tablesize = SG_ALL, | 1620 | .sg_tablesize = SG_ALL, |
@@ -1613,6 +1623,26 @@ static struct scsi_host_template driver_template = { | |||
1613 | }; | 1623 | }; |
1614 | 1624 | ||
1615 | /** | 1625 | /** |
1626 | * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver | ||
1627 | * | ||
1628 | * @vdev: struct vio_dev for the device whose desired IO mem is to be returned | ||
1629 | * | ||
1630 | * Return value: | ||
1631 | * Number of bytes of IO data the driver will need to perform well. | ||
1632 | */ | ||
1633 | static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev) | ||
1634 | { | ||
1635 | /* iu_storage data allocated in initialize_event_pool */ | ||
1636 | unsigned long desired_io = max_requests * sizeof(union viosrp_iu); | ||
1637 | |||
1638 | /* add io space for sg data */ | ||
1639 | desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * | ||
1640 | IBMVSCSI_CMDS_PER_LUN_DEFAULT); | ||
1641 | |||
1642 | return desired_io; | ||
1643 | } | ||
1644 | |||
1645 | /** | ||
1616 | * Called by bus code for each adapter | 1646 | * Called by bus code for each adapter |
1617 | */ | 1647 | */ |
1618 | static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | 1648 | static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
@@ -1641,7 +1671,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
1641 | hostdata->host = host; | 1671 | hostdata->host = host; |
1642 | hostdata->dev = dev; | 1672 | hostdata->dev = dev; |
1643 | atomic_set(&hostdata->request_limit, -1); | 1673 | atomic_set(&hostdata->request_limit, -1); |
1644 | hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ | 1674 | hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; |
1645 | 1675 | ||
1646 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests); | 1676 | rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests); |
1647 | if (rc != 0 && rc != H_RESOURCE) { | 1677 | if (rc != 0 && rc != H_RESOURCE) { |
@@ -1735,6 +1765,7 @@ static struct vio_driver ibmvscsi_driver = { | |||
1735 | .id_table = ibmvscsi_device_table, | 1765 | .id_table = ibmvscsi_device_table, |
1736 | .probe = ibmvscsi_probe, | 1766 | .probe = ibmvscsi_probe, |
1737 | .remove = ibmvscsi_remove, | 1767 | .remove = ibmvscsi_remove, |
1768 | .get_desired_dma = ibmvscsi_get_desired_dma, | ||
1738 | .driver = { | 1769 | .driver = { |
1739 | .name = "ibmvscsi", | 1770 | .name = "ibmvscsi", |
1740 | .owner = THIS_MODULE, | 1771 | .owner = THIS_MODULE, |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 46e850e302c7..2d4339d5e16e 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h | |||
@@ -45,6 +45,8 @@ struct Scsi_Host; | |||
45 | #define MAX_INDIRECT_BUFS 10 | 45 | #define MAX_INDIRECT_BUFS 10 |
46 | 46 | ||
47 | #define IBMVSCSI_MAX_REQUESTS_DEFAULT 100 | 47 | #define IBMVSCSI_MAX_REQUESTS_DEFAULT 100 |
48 | #define IBMVSCSI_CMDS_PER_LUN_DEFAULT 16 | ||
49 | #define IBMVSCSI_MAX_SECTORS_DEFAULT 256 /* 32 * 8 = default max I/O 32 pages */ | ||
48 | #define IBMVSCSI_MAX_CMDS_PER_LUN 64 | 50 | #define IBMVSCSI_MAX_CMDS_PER_LUN 64 |
49 | 51 | ||
50 | /* ------------------------------------------------------------ | 52 | /* ------------------------------------------------------------ |