diff options
author | Bart Van Assche <bart.vanassche@sandisk.com> | 2017-01-20 16:04:12 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-01-24 12:23:35 -0500 |
commit | 5f0cb80134a0035829bf7580126ea371c4aefec5 (patch) | |
tree | 7b4fdece25aea92a453428961bd7c57cf091b882 | |
parent | e6d356d3cdfdacaff5d9d3e26de05c6068c03ca4 (diff) |
IB/qib: Remove DMA mapping code
The qib DMA mapping code is no longer built since commit eb636ac0e49e
("IB/qib: Remove dma.c and use rdmavt version of dma functions"). Hence
remove it.
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Mike Marciniszyn <mike.marciniszyn@intel.com>
Cc: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/qib/qib_dma.c | 169 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_keys.c | 5 |
2 files changed, 1 insertions, 173 deletions
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c deleted file mode 100644 index 59fe092b4b0f..000000000000 --- a/drivers/infiniband/hw/qib/qib_dma.c +++ /dev/null | |||
@@ -1,169 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/scatterlist.h> | ||
34 | |||
35 | #include "qib_verbs.h" | ||
36 | |||
37 | #define BAD_DMA_ADDRESS ((u64) 0) | ||
38 | |||
39 | /* | ||
40 | * The following functions implement driver specific replacements | ||
41 | * for the ib_dma_*() functions. | ||
42 | * | ||
43 | * These functions return kernel virtual addresses instead of | ||
44 | * device bus addresses since the driver uses the CPU to copy | ||
45 | * data instead of using hardware DMA. | ||
46 | */ | ||
47 | |||
48 | static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) | ||
49 | { | ||
50 | return dma_addr == BAD_DMA_ADDRESS; | ||
51 | } | ||
52 | |||
53 | static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, | ||
54 | size_t size, enum dma_data_direction direction) | ||
55 | { | ||
56 | BUG_ON(!valid_dma_direction(direction)); | ||
57 | return (u64) cpu_addr; | ||
58 | } | ||
59 | |||
60 | static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | BUG_ON(!valid_dma_direction(direction)); | ||
64 | } | ||
65 | |||
66 | static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, | ||
67 | unsigned long offset, size_t size, | ||
68 | enum dma_data_direction direction) | ||
69 | { | ||
70 | u64 addr; | ||
71 | |||
72 | BUG_ON(!valid_dma_direction(direction)); | ||
73 | |||
74 | if (offset + size > PAGE_SIZE) { | ||
75 | addr = BAD_DMA_ADDRESS; | ||
76 | goto done; | ||
77 | } | ||
78 | |||
79 | addr = (u64) page_address(page); | ||
80 | if (addr) | ||
81 | addr += offset; | ||
82 | /* TODO: handle highmem pages */ | ||
83 | |||
84 | done: | ||
85 | return addr; | ||
86 | } | ||
87 | |||
88 | static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, | ||
89 | enum dma_data_direction direction) | ||
90 | { | ||
91 | BUG_ON(!valid_dma_direction(direction)); | ||
92 | } | ||
93 | |||
94 | static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, | ||
95 | int nents, enum dma_data_direction direction) | ||
96 | { | ||
97 | struct scatterlist *sg; | ||
98 | u64 addr; | ||
99 | int i; | ||
100 | int ret = nents; | ||
101 | |||
102 | BUG_ON(!valid_dma_direction(direction)); | ||
103 | |||
104 | for_each_sg(sgl, sg, nents, i) { | ||
105 | addr = (u64) page_address(sg_page(sg)); | ||
106 | /* TODO: handle highmem pages */ | ||
107 | if (!addr) { | ||
108 | ret = 0; | ||
109 | break; | ||
110 | } | ||
111 | sg->dma_address = addr + sg->offset; | ||
112 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
113 | sg->dma_length = sg->length; | ||
114 | #endif | ||
115 | } | ||
116 | return ret; | ||
117 | } | ||
118 | |||
119 | static void qib_unmap_sg(struct ib_device *dev, | ||
120 | struct scatterlist *sg, int nents, | ||
121 | enum dma_data_direction direction) | ||
122 | { | ||
123 | BUG_ON(!valid_dma_direction(direction)); | ||
124 | } | ||
125 | |||
126 | static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, | ||
127 | size_t size, enum dma_data_direction dir) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, | ||
132 | size_t size, | ||
133 | enum dma_data_direction dir) | ||
134 | { | ||
135 | } | ||
136 | |||
137 | static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, | ||
138 | u64 *dma_handle, gfp_t flag) | ||
139 | { | ||
140 | struct page *p; | ||
141 | void *addr = NULL; | ||
142 | |||
143 | p = alloc_pages(flag, get_order(size)); | ||
144 | if (p) | ||
145 | addr = page_address(p); | ||
146 | if (dma_handle) | ||
147 | *dma_handle = (u64) addr; | ||
148 | return addr; | ||
149 | } | ||
150 | |||
151 | static void qib_dma_free_coherent(struct ib_device *dev, size_t size, | ||
152 | void *cpu_addr, u64 dma_handle) | ||
153 | { | ||
154 | free_pages((unsigned long) cpu_addr, get_order(size)); | ||
155 | } | ||
156 | |||
157 | struct ib_dma_mapping_ops qib_dma_mapping_ops = { | ||
158 | .mapping_error = qib_mapping_error, | ||
159 | .map_single = qib_dma_map_single, | ||
160 | .unmap_single = qib_dma_unmap_single, | ||
161 | .map_page = qib_dma_map_page, | ||
162 | .unmap_page = qib_dma_unmap_page, | ||
163 | .map_sg = qib_map_sg, | ||
164 | .unmap_sg = qib_unmap_sg, | ||
165 | .sync_single_for_cpu = qib_sync_single_for_cpu, | ||
166 | .sync_single_for_device = qib_sync_single_for_device, | ||
167 | .alloc_coherent = qib_dma_alloc_coherent, | ||
168 | .free_coherent = qib_dma_free_coherent | ||
169 | }; | ||
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 2c3c93572c17..8fdf79f8d4e4 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -158,10 +158,7 @@ int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, | |||
158 | unsigned n, m; | 158 | unsigned n, m; |
159 | size_t off; | 159 | size_t off; |
160 | 160 | ||
161 | /* | 161 | /* We use RKEY == zero for kernel virtual addresses */ |
162 | * We use RKEY == zero for kernel virtual addresses | ||
163 | * (see qib_get_dma_mr and qib_dma.c). | ||
164 | */ | ||
165 | rcu_read_lock(); | 162 | rcu_read_lock(); |
166 | if (rkey == 0) { | 163 | if (rkey == 0) { |
167 | struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); | 164 | struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); |