diff options
author | Bart Van Assche <bart.vanassche@sandisk.com> | 2017-01-20 16:04:36 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-01-24 12:26:17 -0500 |
commit | 99db9494035f5b9fbb1d579f89c6fa1beba6dbb7 (patch) | |
tree | ae9fbb5059c019095aad90b8600823bc25344238 | |
parent | 92f4ae3559832ef1e5f785cfea14c231ef40612e (diff) |
IB/core: Remove ib_device.dma_device
Add code in ib_register_device() for copying the DMA masks. Use
&ib_device.dev in DMA mapping operations instead of dma_device.
Remove ib_device.dma_device because due to this and previous patches
it is no longer used.
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/core/device.c | 17 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 31 |
2 files changed, 22 insertions, 26 deletions
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index d543c4390447..cac1518de36e 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -333,14 +333,15 @@ int ib_register_device(struct ib_device *device, | |||
333 | int ret; | 333 | int ret; |
334 | struct ib_client *client; | 334 | struct ib_client *client; |
335 | struct ib_udata uhw = {.outlen = 0, .inlen = 0}; | 335 | struct ib_udata uhw = {.outlen = 0, .inlen = 0}; |
336 | 336 | struct device *parent = device->dev.parent; | |
337 | WARN_ON_ONCE(!device->dev.parent && !device->dma_device); | 337 | |
338 | WARN_ON_ONCE(device->dev.parent && device->dma_device | 338 | WARN_ON_ONCE(!parent); |
339 | && device->dev.parent != device->dma_device); | 339 | if (!device->dev.dma_ops) |
340 | if (!device->dev.parent) | 340 | device->dev.dma_ops = parent->dma_ops; |
341 | device->dev.parent = device->dma_device; | 341 | if (!device->dev.dma_mask) |
342 | if (!device->dma_device) | 342 | device->dev.dma_mask = parent->dma_mask; |
343 | device->dma_device = device->dev.parent; | 343 | if (!device->dev.coherent_dma_mask) |
344 | device->dev.coherent_dma_mask = parent->coherent_dma_mask; | ||
344 | 345 | ||
345 | mutex_lock(&device_mutex); | 346 | mutex_lock(&device_mutex); |
346 | 347 | ||
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 694e39e4f1ff..a20a15f81936 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1841,8 +1841,6 @@ struct ib_port_immutable { | |||
1841 | }; | 1841 | }; |
1842 | 1842 | ||
1843 | struct ib_device { | 1843 | struct ib_device { |
1844 | struct device *dma_device; | ||
1845 | |||
1846 | char name[IB_DEVICE_NAME_MAX]; | 1844 | char name[IB_DEVICE_NAME_MAX]; |
1847 | 1845 | ||
1848 | struct list_head event_handler_list; | 1846 | struct list_head event_handler_list; |
@@ -2969,7 +2967,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) | |||
2969 | { | 2967 | { |
2970 | if (dev->dma_ops) | 2968 | if (dev->dma_ops) |
2971 | return dev->dma_ops->mapping_error(dev, dma_addr); | 2969 | return dev->dma_ops->mapping_error(dev, dma_addr); |
2972 | return dma_mapping_error(dev->dma_device, dma_addr); | 2970 | return dma_mapping_error(&dev->dev, dma_addr); |
2973 | } | 2971 | } |
2974 | 2972 | ||
2975 | /** | 2973 | /** |
@@ -2985,7 +2983,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, | |||
2985 | { | 2983 | { |
2986 | if (dev->dma_ops) | 2984 | if (dev->dma_ops) |
2987 | return dev->dma_ops->map_single(dev, cpu_addr, size, direction); | 2985 | return dev->dma_ops->map_single(dev, cpu_addr, size, direction); |
2988 | return dma_map_single(dev->dma_device, cpu_addr, size, direction); | 2986 | return dma_map_single(&dev->dev, cpu_addr, size, direction); |
2989 | } | 2987 | } |
2990 | 2988 | ||
2991 | /** | 2989 | /** |
@@ -3002,7 +3000,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, | |||
3002 | if (dev->dma_ops) | 3000 | if (dev->dma_ops) |
3003 | dev->dma_ops->unmap_single(dev, addr, size, direction); | 3001 | dev->dma_ops->unmap_single(dev, addr, size, direction); |
3004 | else | 3002 | else |
3005 | dma_unmap_single(dev->dma_device, addr, size, direction); | 3003 | dma_unmap_single(&dev->dev, addr, size, direction); |
3006 | } | 3004 | } |
3007 | 3005 | ||
3008 | /** | 3006 | /** |
@@ -3021,7 +3019,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, | |||
3021 | { | 3019 | { |
3022 | if (dev->dma_ops) | 3020 | if (dev->dma_ops) |
3023 | return dev->dma_ops->map_page(dev, page, offset, size, direction); | 3021 | return dev->dma_ops->map_page(dev, page, offset, size, direction); |
3024 | return dma_map_page(dev->dma_device, page, offset, size, direction); | 3022 | return dma_map_page(&dev->dev, page, offset, size, direction); |
3025 | } | 3023 | } |
3026 | 3024 | ||
3027 | /** | 3025 | /** |
@@ -3038,7 +3036,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, | |||
3038 | if (dev->dma_ops) | 3036 | if (dev->dma_ops) |
3039 | dev->dma_ops->unmap_page(dev, addr, size, direction); | 3037 | dev->dma_ops->unmap_page(dev, addr, size, direction); |
3040 | else | 3038 | else |
3041 | dma_unmap_page(dev->dma_device, addr, size, direction); | 3039 | dma_unmap_page(&dev->dev, addr, size, direction); |
3042 | } | 3040 | } |
3043 | 3041 | ||
3044 | /** | 3042 | /** |
@@ -3054,7 +3052,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, | |||
3054 | { | 3052 | { |
3055 | if (dev->dma_ops) | 3053 | if (dev->dma_ops) |
3056 | return dev->dma_ops->map_sg(dev, sg, nents, direction); | 3054 | return dev->dma_ops->map_sg(dev, sg, nents, direction); |
3057 | return dma_map_sg(dev->dma_device, sg, nents, direction); | 3055 | return dma_map_sg(&dev->dev, sg, nents, direction); |
3058 | } | 3056 | } |
3059 | 3057 | ||
3060 | /** | 3058 | /** |
@@ -3071,7 +3069,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, | |||
3071 | if (dev->dma_ops) | 3069 | if (dev->dma_ops) |
3072 | dev->dma_ops->unmap_sg(dev, sg, nents, direction); | 3070 | dev->dma_ops->unmap_sg(dev, sg, nents, direction); |
3073 | else | 3071 | else |
3074 | dma_unmap_sg(dev->dma_device, sg, nents, direction); | 3072 | dma_unmap_sg(&dev->dev, sg, nents, direction); |
3075 | } | 3073 | } |
3076 | 3074 | ||
3077 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, | 3075 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, |
@@ -3082,9 +3080,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, | |||
3082 | if (dev->dma_ops) | 3080 | if (dev->dma_ops) |
3083 | return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, | 3081 | return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, |
3084 | dma_attrs); | 3082 | dma_attrs); |
3085 | else | 3083 | return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); |
3086 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, | ||
3087 | dma_attrs); | ||
3088 | } | 3084 | } |
3089 | 3085 | ||
3090 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, | 3086 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, |
@@ -3096,8 +3092,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, | |||
3096 | return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, | 3092 | return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, |
3097 | dma_attrs); | 3093 | dma_attrs); |
3098 | else | 3094 | else |
3099 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, | 3095 | dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); |
3100 | dma_attrs); | ||
3101 | } | 3096 | } |
3102 | /** | 3097 | /** |
3103 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry | 3098 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry |
@@ -3142,7 +3137,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, | |||
3142 | if (dev->dma_ops) | 3137 | if (dev->dma_ops) |
3143 | dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); | 3138 | dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); |
3144 | else | 3139 | else |
3145 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); | 3140 | dma_sync_single_for_cpu(&dev->dev, addr, size, dir); |
3146 | } | 3141 | } |
3147 | 3142 | ||
3148 | /** | 3143 | /** |
@@ -3160,7 +3155,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, | |||
3160 | if (dev->dma_ops) | 3155 | if (dev->dma_ops) |
3161 | dev->dma_ops->sync_single_for_device(dev, addr, size, dir); | 3156 | dev->dma_ops->sync_single_for_device(dev, addr, size, dir); |
3162 | else | 3157 | else |
3163 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); | 3158 | dma_sync_single_for_device(&dev->dev, addr, size, dir); |
3164 | } | 3159 | } |
3165 | 3160 | ||
3166 | /** | 3161 | /** |
@@ -3183,7 +3178,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, | |||
3183 | *dma_handle = handle; | 3178 | *dma_handle = handle; |
3184 | return ret; | 3179 | return ret; |
3185 | } | 3180 | } |
3186 | return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); | 3181 | return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); |
3187 | } | 3182 | } |
3188 | 3183 | ||
3189 | /** | 3184 | /** |
@@ -3200,7 +3195,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, | |||
3200 | if (dev->dma_ops) | 3195 | if (dev->dma_ops) |
3201 | dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | 3196 | dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
3202 | else | 3197 | else |
3203 | dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); | 3198 | dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); |
3204 | } | 3199 | } |
3205 | 3200 | ||
3206 | /** | 3201 | /** |