aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/cache.c')
-rw-r--r--drivers/infiniband/core/cache.c69
1 files changed, 29 insertions, 40 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 80f6cf2449fb..871da832d016 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -58,17 +58,6 @@ struct ib_update_work {
58 u8 port_num; 58 u8 port_num;
59}; 59};
60 60
61static inline int start_port(struct ib_device *device)
62{
63 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
64}
65
66static inline int end_port(struct ib_device *device)
67{
68 return (device->node_type == RDMA_NODE_IB_SWITCH) ?
69 0 : device->phys_port_cnt;
70}
71
72int ib_get_cached_gid(struct ib_device *device, 61int ib_get_cached_gid(struct ib_device *device,
73 u8 port_num, 62 u8 port_num,
74 int index, 63 int index,
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
78 unsigned long flags; 67 unsigned long flags;
79 int ret = 0; 68 int ret = 0;
80 69
81 if (port_num < start_port(device) || port_num > end_port(device)) 70 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
82 return -EINVAL; 71 return -EINVAL;
83 72
84 read_lock_irqsave(&device->cache.lock, flags); 73 read_lock_irqsave(&device->cache.lock, flags);
85 74
86 cache = device->cache.gid_cache[port_num - start_port(device)]; 75 cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
87 76
88 if (index < 0 || index >= cache->table_len) 77 if (index < 0 || index >= cache->table_len)
89 ret = -EINVAL; 78 ret = -EINVAL;
@@ -96,10 +85,10 @@ int ib_get_cached_gid(struct ib_device *device,
96} 85}
97EXPORT_SYMBOL(ib_get_cached_gid); 86EXPORT_SYMBOL(ib_get_cached_gid);
98 87
99int ib_find_cached_gid(struct ib_device *device, 88int ib_find_cached_gid(struct ib_device *device,
100 union ib_gid *gid, 89 const union ib_gid *gid,
101 u8 *port_num, 90 u8 *port_num,
102 u16 *index) 91 u16 *index)
103{ 92{
104 struct ib_gid_cache *cache; 93 struct ib_gid_cache *cache;
105 unsigned long flags; 94 unsigned long flags;
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,
112 101
113 read_lock_irqsave(&device->cache.lock, flags); 102 read_lock_irqsave(&device->cache.lock, flags);
114 103
115 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 104 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
116 cache = device->cache.gid_cache[p]; 105 cache = device->cache.gid_cache[p];
117 for (i = 0; i < cache->table_len; ++i) { 106 for (i = 0; i < cache->table_len; ++i) {
118 if (!memcmp(gid, &cache->table[i], sizeof *gid)) { 107 if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
119 *port_num = p + start_port(device); 108 *port_num = p + rdma_start_port(device);
120 if (index) 109 if (index)
121 *index = i; 110 *index = i;
122 ret = 0; 111 ret = 0;
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
140 unsigned long flags; 129 unsigned long flags;
141 int ret = 0; 130 int ret = 0;
142 131
143 if (port_num < start_port(device) || port_num > end_port(device)) 132 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
144 return -EINVAL; 133 return -EINVAL;
145 134
146 read_lock_irqsave(&device->cache.lock, flags); 135 read_lock_irqsave(&device->cache.lock, flags);
147 136
148 cache = device->cache.pkey_cache[port_num - start_port(device)]; 137 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
149 138
150 if (index < 0 || index >= cache->table_len) 139 if (index < 0 || index >= cache->table_len)
151 ret = -EINVAL; 140 ret = -EINVAL;
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
169 int ret = -ENOENT; 158 int ret = -ENOENT;
170 int partial_ix = -1; 159 int partial_ix = -1;
171 160
172 if (port_num < start_port(device) || port_num > end_port(device)) 161 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
173 return -EINVAL; 162 return -EINVAL;
174 163
175 read_lock_irqsave(&device->cache.lock, flags); 164 read_lock_irqsave(&device->cache.lock, flags);
176 165
177 cache = device->cache.pkey_cache[port_num - start_port(device)]; 166 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
178 167
179 *index = -1; 168 *index = -1;
180 169
@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
209 int i; 198 int i;
210 int ret = -ENOENT; 199 int ret = -ENOENT;
211 200
212 if (port_num < start_port(device) || port_num > end_port(device)) 201 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
213 return -EINVAL; 202 return -EINVAL;
214 203
215 read_lock_irqsave(&device->cache.lock, flags); 204 read_lock_irqsave(&device->cache.lock, flags);
216 205
217 cache = device->cache.pkey_cache[port_num - start_port(device)]; 206 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
218 207
219 *index = -1; 208 *index = -1;
220 209
@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
238 unsigned long flags; 227 unsigned long flags;
239 int ret = 0; 228 int ret = 0;
240 229
241 if (port_num < start_port(device) || port_num > end_port(device)) 230 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
242 return -EINVAL; 231 return -EINVAL;
243 232
244 read_lock_irqsave(&device->cache.lock, flags); 233 read_lock_irqsave(&device->cache.lock, flags);
245 *lmc = device->cache.lmc_cache[port_num - start_port(device)]; 234 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
246 read_unlock_irqrestore(&device->cache.lock, flags); 235 read_unlock_irqrestore(&device->cache.lock, flags);
247 236
248 return ret; 237 return ret;
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,
303 292
304 write_lock_irq(&device->cache.lock); 293 write_lock_irq(&device->cache.lock);
305 294
306 old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; 295 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
307 old_gid_cache = device->cache.gid_cache [port - start_port(device)]; 296 old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)];
308 297
309 device->cache.pkey_cache[port - start_port(device)] = pkey_cache; 298 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
310 device->cache.gid_cache [port - start_port(device)] = gid_cache; 299 device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
311 300
312 device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; 301 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
313 302
314 write_unlock_irq(&device->cache.lock); 303 write_unlock_irq(&device->cache.lock);
315 304
@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)
363 352
364 device->cache.pkey_cache = 353 device->cache.pkey_cache =
365 kmalloc(sizeof *device->cache.pkey_cache * 354 kmalloc(sizeof *device->cache.pkey_cache *
366 (end_port(device) - start_port(device) + 1), GFP_KERNEL); 355 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
367 device->cache.gid_cache = 356 device->cache.gid_cache =
368 kmalloc(sizeof *device->cache.gid_cache * 357 kmalloc(sizeof *device->cache.gid_cache *
369 (end_port(device) - start_port(device) + 1), GFP_KERNEL); 358 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
370 359
371 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * 360 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
372 (end_port(device) - 361 (rdma_end_port(device) -
373 start_port(device) + 1), 362 rdma_start_port(device) + 1),
374 GFP_KERNEL); 363 GFP_KERNEL);
375 364
376 if (!device->cache.pkey_cache || !device->cache.gid_cache || 365 if (!device->cache.pkey_cache || !device->cache.gid_cache ||
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
380 goto err; 369 goto err;
381 } 370 }
382 371
383 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 372 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
384 device->cache.pkey_cache[p] = NULL; 373 device->cache.pkey_cache[p] = NULL;
385 device->cache.gid_cache [p] = NULL; 374 device->cache.gid_cache [p] = NULL;
386 ib_cache_update(device, p + start_port(device)); 375 ib_cache_update(device, p + rdma_start_port(device));
387 } 376 }
388 377
389 INIT_IB_EVENT_HANDLER(&device->cache.event_handler, 378 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
394 return; 383 return;
395 384
396err_cache: 385err_cache:
397 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 386 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
398 kfree(device->cache.pkey_cache[p]); 387 kfree(device->cache.pkey_cache[p]);
399 kfree(device->cache.gid_cache[p]); 388 kfree(device->cache.gid_cache[p]);
400 } 389 }
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
412 ib_unregister_event_handler(&device->cache.event_handler); 401 ib_unregister_event_handler(&device->cache.event_handler);
413 flush_workqueue(ib_wq); 402 flush_workqueue(ib_wq);
414 403
415 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 404 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
416 kfree(device->cache.pkey_cache[p]); 405 kfree(device->cache.pkey_cache[p]);
417 kfree(device->cache.gid_cache[p]); 406 kfree(device->cache.gid_cache[p]);
418 } 407 }