2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
41 #include <rdma/ib_cache.h>
43 #include "core_priv.h"
45 struct ib_pkey_cache {
52 union ib_gid table[0];
55 struct ib_update_work {
56 struct work_struct work;
57 struct ib_device *device;
61 int ib_get_cached_gid(struct ib_device *device,
66 struct ib_gid_cache *cache;
70 if (!rdma_is_port_valid(device, port_num))
73 read_lock_irqsave(&device->cache.lock, flags);
75 cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
77 if (index < 0 || index >= cache->table_len)
80 *gid = cache->table[index];
82 read_unlock_irqrestore(&device->cache.lock, flags);
86 EXPORT_SYMBOL(ib_get_cached_gid);
88 int ib_find_cached_gid(struct ib_device *device,
93 struct ib_gid_cache *cache;
102 read_lock_irqsave(&device->cache.lock, flags);
104 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
105 cache = device->cache.gid_cache[p];
106 for (i = 0; i < cache->table_len; ++i) {
107 if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
108 *port_num = p + rdma_start_port(device);
117 read_unlock_irqrestore(&device->cache.lock, flags);
121 EXPORT_SYMBOL(ib_find_cached_gid);
123 int ib_get_cached_pkey(struct ib_device *device,
128 struct ib_pkey_cache *cache;
132 if (!rdma_is_port_valid(device, port_num))
135 read_lock_irqsave(&device->cache.lock, flags);
137 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
139 if (index < 0 || index >= cache->table_len)
142 *pkey = cache->table[index];
144 read_unlock_irqrestore(&device->cache.lock, flags);
148 EXPORT_SYMBOL(ib_get_cached_pkey);
150 int ib_find_cached_pkey(struct ib_device *device,
155 struct ib_pkey_cache *cache;
160 if (!rdma_is_port_valid(device, port_num))
163 read_lock_irqsave(&device->cache.lock, flags);
165 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
169 for (i = 0; i < cache->table_len; ++i)
170 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
176 read_unlock_irqrestore(&device->cache.lock, flags);
180 EXPORT_SYMBOL(ib_find_cached_pkey);
182 int ib_get_cached_lmc(struct ib_device *device,
189 if (!rdma_is_port_valid(device, port_num))
192 read_lock_irqsave(&device->cache.lock, flags);
193 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
194 read_unlock_irqrestore(&device->cache.lock, flags);
198 EXPORT_SYMBOL(ib_get_cached_lmc);
200 static void ib_cache_update(struct ib_device *device,
203 struct ib_port_attr *tprops = NULL;
204 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
205 struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
209 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
213 ret = ib_query_port(device, port, tprops);
215 printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
220 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
221 sizeof *pkey_cache->table, GFP_KERNEL);
225 pkey_cache->table_len = tprops->pkey_tbl_len;
227 gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
228 sizeof *gid_cache->table, GFP_KERNEL);
232 gid_cache->table_len = tprops->gid_tbl_len;
234 for (i = 0; i < pkey_cache->table_len; ++i) {
235 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
237 printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
238 ret, device->name, i);
243 for (i = 0; i < gid_cache->table_len; ++i) {
244 ret = ib_query_gid(device, port, i, gid_cache->table + i);
246 printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
247 ret, device->name, i);
252 write_lock_irq(&device->cache.lock);
254 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
255 old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)];
257 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
258 device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
260 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
262 write_unlock_irq(&device->cache.lock);
264 kfree(old_pkey_cache);
265 kfree(old_gid_cache);
275 static void ib_cache_task(struct work_struct *_work)
277 struct ib_update_work *work =
278 container_of(_work, struct ib_update_work, work);
280 ib_cache_update(work->device, work->port_num);
284 static void ib_cache_event(struct ib_event_handler *handler,
285 struct ib_event *event)
287 struct ib_update_work *work;
289 if (event->event == IB_EVENT_PORT_ERR ||
290 event->event == IB_EVENT_PORT_ACTIVE ||
291 event->event == IB_EVENT_LID_CHANGE ||
292 event->event == IB_EVENT_PKEY_CHANGE ||
293 event->event == IB_EVENT_SM_CHANGE ||
294 event->event == IB_EVENT_CLIENT_REREGISTER ||
295 event->event == IB_EVENT_GID_CHANGE) {
296 work = kmalloc(sizeof *work, GFP_ATOMIC);
298 INIT_WORK(&work->work, ib_cache_task);
299 work->device = event->device;
300 work->port_num = event->element.port_num;
301 queue_work(ib_wq, &work->work);
306 static void ib_cache_setup_one(struct ib_device *device)
310 rwlock_init(&device->cache.lock);
312 device->cache.pkey_cache =
313 kmalloc(sizeof *device->cache.pkey_cache *
314 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
315 device->cache.gid_cache =
316 kmalloc(sizeof *device->cache.gid_cache *
317 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
319 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
320 (rdma_end_port(device) -
321 rdma_start_port(device) + 1),
324 if (!device->cache.pkey_cache || !device->cache.gid_cache ||
325 !device->cache.lmc_cache) {
326 printk(KERN_WARNING "Couldn't allocate cache "
327 "for %s\n", device->name);
331 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
332 device->cache.pkey_cache[p] = NULL;
333 device->cache.gid_cache [p] = NULL;
334 ib_cache_update(device, p + rdma_start_port(device));
337 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
338 device, ib_cache_event);
339 if (ib_register_event_handler(&device->cache.event_handler))
345 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
346 kfree(device->cache.pkey_cache[p]);
347 kfree(device->cache.gid_cache[p]);
351 kfree(device->cache.pkey_cache);
352 kfree(device->cache.gid_cache);
353 kfree(device->cache.lmc_cache);
356 static void ib_cache_cleanup_one(struct ib_device *device)
360 ib_unregister_event_handler(&device->cache.event_handler);
361 flush_workqueue(ib_wq);
363 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
364 kfree(device->cache.pkey_cache[p]);
365 kfree(device->cache.gid_cache[p]);
368 kfree(device->cache.pkey_cache);
369 kfree(device->cache.gid_cache);
370 kfree(device->cache.lmc_cache);
373 static struct ib_client cache_client = {
375 .add = ib_cache_setup_one,
376 .remove = ib_cache_cleanup_one
379 int __init ib_cache_setup(void)
381 return ib_register_client(&cache_client);
384 void __exit ib_cache_cleanup(void)
386 ib_unregister_client(&cache_client);