2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
36 #include <linux/dma-mapping.h>
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
49 kmem_cache_t *ib_mad_cache;
51 static struct list_head ib_mad_port_list;
52 static u32 ib_mad_client_id = 0;
55 static spinlock_t ib_mad_port_list_lock;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table **method,
60 struct ib_mad_reg_req *mad_reg_req);
61 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
62 static struct ib_mad_agent_private *find_mad_agent(
63 struct ib_mad_port_private *port_priv,
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
66 struct ib_mad_private *mad);
67 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
68 static void timeout_sends(void *data);
69 static void local_completions(void *data);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv,
73 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
74 struct ib_mad_agent_private *agent_priv);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private *
81 __ib_get_mad_port(struct ib_device *device, int port_num)
83 struct ib_mad_port_private *entry;
85 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
86 if (entry->device == device && entry->port_num == port_num)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private *
97 ib_get_mad_port(struct ib_device *device, int port_num)
99 struct ib_mad_port_private *entry;
102 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
103 entry = __ib_get_mad_port(device, port_num);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
109 static inline u8 convert_mgmt_class(u8 mgmt_class)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
116 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 static int vendor_class_index(u8 mgmt_class)
131 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
134 static int is_vendor_class(u8 mgmt_class)
136 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
137 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
142 static int is_vendor_oui(char *oui)
144 if (oui[0] || oui[1] || oui[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class *vendor_class,
151 struct ib_mad_reg_req *mad_reg_req)
153 struct ib_mad_mgmt_method_table *method;
156 for (i = 0; i < MAX_MGMT_OUI; i++) {
157 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
158 method = vendor_class->method_table[i];
160 if (method_in_use(&method, mad_reg_req))
171 * ib_register_mad_agent - Register to send/receive MADs
173 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
175 enum ib_qp_type qp_type,
176 struct ib_mad_reg_req *mad_reg_req,
178 ib_mad_send_handler send_handler,
179 ib_mad_recv_handler recv_handler,
182 struct ib_mad_port_private *port_priv;
183 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
184 struct ib_mad_agent_private *mad_agent_priv;
185 struct ib_mad_reg_req *reg_req = NULL;
186 struct ib_mad_mgmt_class_table *class;
187 struct ib_mad_mgmt_vendor_class_table *vendor;
188 struct ib_mad_mgmt_vendor_class *vendor_class;
189 struct ib_mad_mgmt_method_table *method;
192 u8 mgmt_class, vclass;
194 /* Validate parameters */
195 qpn = get_spl_qp_index(qp_type);
199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
208 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
213 if (mad_reg_req->mgmt_class !=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
216 } else if (mad_reg_req->mgmt_class == 0) {
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
222 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
227 if (!is_vendor_oui(mad_reg_req->oui))
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class !=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
234 (mad_reg_req->mgmt_class !=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
238 if ((mad_reg_req->mgmt_class ==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
240 (mad_reg_req->mgmt_class ==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
245 /* No registration request supplied */
250 /* Validate device and port */
251 port_priv = ib_get_mad_port(device, port_num);
253 ret = ERR_PTR(-ENODEV);
257 /* Allocate structures */
258 mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL);
259 if (!mad_agent_priv) {
260 ret = ERR_PTR(-ENOMEM);
263 memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
265 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
266 IB_ACCESS_LOCAL_WRITE);
267 if (IS_ERR(mad_agent_priv->agent.mr)) {
268 ret = ERR_PTR(-ENOMEM);
273 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
275 ret = ERR_PTR(-ENOMEM);
278 /* Make a copy of the MAD registration request */
279 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
282 /* Now, fill in the various structures */
283 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
284 mad_agent_priv->reg_req = reg_req;
285 mad_agent_priv->agent.rmpp_version = rmpp_version;
286 mad_agent_priv->agent.device = device;
287 mad_agent_priv->agent.recv_handler = recv_handler;
288 mad_agent_priv->agent.send_handler = send_handler;
289 mad_agent_priv->agent.context = context;
290 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
291 mad_agent_priv->agent.port_num = port_num;
293 spin_lock_irqsave(&port_priv->reg_lock, flags);
294 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
297 * Make sure MAD registration (if supplied)
298 * is non overlapping with any existing ones
301 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
302 if (!is_vendor_class(mgmt_class)) {
303 class = port_priv->version[mad_reg_req->
304 mgmt_class_version].class;
306 method = class->method_table[mgmt_class];
308 if (method_in_use(&method,
313 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
316 /* "New" vendor class range */
317 vendor = port_priv->version[mad_reg_req->
318 mgmt_class_version].vendor;
320 vclass = vendor_class_index(mgmt_class);
321 vendor_class = vendor->vendor_class[vclass];
323 if (is_vendor_method_in_use(
329 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
337 /* Add mad agent into port's agent list */
338 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
339 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
341 spin_lock_init(&mad_agent_priv->lock);
342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
344 INIT_LIST_HEAD(&mad_agent_priv->done_list);
345 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
346 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
347 INIT_LIST_HEAD(&mad_agent_priv->local_list);
348 INIT_WORK(&mad_agent_priv->local_work, local_completions,
350 atomic_set(&mad_agent_priv->refcount, 1);
351 init_waitqueue_head(&mad_agent_priv->wait);
353 return &mad_agent_priv->agent;
356 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
359 kfree(mad_agent_priv);
361 ib_dereg_mr(mad_agent_priv->agent.mr);
365 EXPORT_SYMBOL(ib_register_mad_agent);
367 static inline int is_snooping_sends(int mad_snoop_flags)
369 return (mad_snoop_flags &
370 (/*IB_MAD_SNOOP_POSTED_SENDS |
371 IB_MAD_SNOOP_RMPP_SENDS |*/
372 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
373 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
376 static inline int is_snooping_recvs(int mad_snoop_flags)
378 return (mad_snoop_flags &
379 (IB_MAD_SNOOP_RECVS /*|
380 IB_MAD_SNOOP_RMPP_RECVS*/));
383 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
384 struct ib_mad_snoop_private *mad_snoop_priv)
386 struct ib_mad_snoop_private **new_snoop_table;
390 spin_lock_irqsave(&qp_info->snoop_lock, flags);
391 /* Check for empty slot in array. */
392 for (i = 0; i < qp_info->snoop_table_size; i++)
393 if (!qp_info->snoop_table[i])
396 if (i == qp_info->snoop_table_size) {
398 new_snoop_table = kmalloc(sizeof mad_snoop_priv *
399 qp_info->snoop_table_size + 1,
401 if (!new_snoop_table) {
405 if (qp_info->snoop_table) {
406 memcpy(new_snoop_table, qp_info->snoop_table,
407 sizeof mad_snoop_priv *
408 qp_info->snoop_table_size);
409 kfree(qp_info->snoop_table);
411 qp_info->snoop_table = new_snoop_table;
412 qp_info->snoop_table_size++;
414 qp_info->snoop_table[i] = mad_snoop_priv;
415 atomic_inc(&qp_info->snoop_count);
417 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
421 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
423 enum ib_qp_type qp_type,
425 ib_mad_snoop_handler snoop_handler,
426 ib_mad_recv_handler recv_handler,
429 struct ib_mad_port_private *port_priv;
430 struct ib_mad_agent *ret;
431 struct ib_mad_snoop_private *mad_snoop_priv;
434 /* Validate parameters */
435 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
436 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
437 ret = ERR_PTR(-EINVAL);
440 qpn = get_spl_qp_index(qp_type);
442 ret = ERR_PTR(-EINVAL);
445 port_priv = ib_get_mad_port(device, port_num);
447 ret = ERR_PTR(-ENODEV);
450 /* Allocate structures */
451 mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
452 if (!mad_snoop_priv) {
453 ret = ERR_PTR(-ENOMEM);
457 /* Now, fill in the various structures */
458 memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
459 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
460 mad_snoop_priv->agent.device = device;
461 mad_snoop_priv->agent.recv_handler = recv_handler;
462 mad_snoop_priv->agent.snoop_handler = snoop_handler;
463 mad_snoop_priv->agent.context = context;
464 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
465 mad_snoop_priv->agent.port_num = port_num;
466 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
467 init_waitqueue_head(&mad_snoop_priv->wait);
468 mad_snoop_priv->snoop_index = register_snoop_agent(
469 &port_priv->qp_info[qpn],
471 if (mad_snoop_priv->snoop_index < 0) {
472 ret = ERR_PTR(mad_snoop_priv->snoop_index);
476 atomic_set(&mad_snoop_priv->refcount, 1);
477 return &mad_snoop_priv->agent;
480 kfree(mad_snoop_priv);
484 EXPORT_SYMBOL(ib_register_mad_snoop);
486 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
488 struct ib_mad_port_private *port_priv;
491 /* Note that we could still be handling received MADs */
494 * Canceling all sends results in dropping received response
495 * MADs, preventing us from queuing additional work
497 cancel_mads(mad_agent_priv);
498 port_priv = mad_agent_priv->qp_info->port_priv;
499 cancel_delayed_work(&mad_agent_priv->timed_work);
501 spin_lock_irqsave(&port_priv->reg_lock, flags);
502 remove_mad_reg_req(mad_agent_priv);
503 list_del(&mad_agent_priv->agent_list);
504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
506 flush_workqueue(port_priv->wq);
507 ib_cancel_rmpp_recvs(mad_agent_priv);
509 atomic_dec(&mad_agent_priv->refcount);
510 wait_event(mad_agent_priv->wait,
511 !atomic_read(&mad_agent_priv->refcount));
513 if (mad_agent_priv->reg_req)
514 kfree(mad_agent_priv->reg_req);
515 ib_dereg_mr(mad_agent_priv->agent.mr);
516 kfree(mad_agent_priv);
519 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
521 struct ib_mad_qp_info *qp_info;
524 qp_info = mad_snoop_priv->qp_info;
525 spin_lock_irqsave(&qp_info->snoop_lock, flags);
526 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
527 atomic_dec(&qp_info->snoop_count);
528 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
530 atomic_dec(&mad_snoop_priv->refcount);
531 wait_event(mad_snoop_priv->wait,
532 !atomic_read(&mad_snoop_priv->refcount));
534 kfree(mad_snoop_priv);
538 * ib_unregister_mad_agent - Unregisters a client from using MAD services
540 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
542 struct ib_mad_agent_private *mad_agent_priv;
543 struct ib_mad_snoop_private *mad_snoop_priv;
545 /* If the TID is zero, the agent can only snoop. */
546 if (mad_agent->hi_tid) {
547 mad_agent_priv = container_of(mad_agent,
548 struct ib_mad_agent_private,
550 unregister_mad_agent(mad_agent_priv);
552 mad_snoop_priv = container_of(mad_agent,
553 struct ib_mad_snoop_private,
555 unregister_mad_snoop(mad_snoop_priv);
559 EXPORT_SYMBOL(ib_unregister_mad_agent);
561 static inline int response_mad(struct ib_mad *mad)
563 /* Trap represses are responses although response bit is reset */
564 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
565 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
568 static void dequeue_mad(struct ib_mad_list_head *mad_list)
570 struct ib_mad_queue *mad_queue;
573 BUG_ON(!mad_list->mad_queue);
574 mad_queue = mad_list->mad_queue;
575 spin_lock_irqsave(&mad_queue->lock, flags);
576 list_del(&mad_list->list);
578 spin_unlock_irqrestore(&mad_queue->lock, flags);
581 static void snoop_send(struct ib_mad_qp_info *qp_info,
582 struct ib_mad_send_buf *send_buf,
583 struct ib_mad_send_wc *mad_send_wc,
586 struct ib_mad_snoop_private *mad_snoop_priv;
590 spin_lock_irqsave(&qp_info->snoop_lock, flags);
591 for (i = 0; i < qp_info->snoop_table_size; i++) {
592 mad_snoop_priv = qp_info->snoop_table[i];
593 if (!mad_snoop_priv ||
594 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
597 atomic_inc(&mad_snoop_priv->refcount);
598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
599 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
600 send_buf, mad_send_wc);
601 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
602 wake_up(&mad_snoop_priv->wait);
603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
605 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
608 static void snoop_recv(struct ib_mad_qp_info *qp_info,
609 struct ib_mad_recv_wc *mad_recv_wc,
612 struct ib_mad_snoop_private *mad_snoop_priv;
616 spin_lock_irqsave(&qp_info->snoop_lock, flags);
617 for (i = 0; i < qp_info->snoop_table_size; i++) {
618 mad_snoop_priv = qp_info->snoop_table[i];
619 if (!mad_snoop_priv ||
620 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
623 atomic_inc(&mad_snoop_priv->refcount);
624 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
625 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
627 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
628 wake_up(&mad_snoop_priv->wait);
629 spin_lock_irqsave(&qp_info->snoop_lock, flags);
631 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
634 static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
637 memset(wc, 0, sizeof *wc);
639 wc->status = IB_WC_SUCCESS;
640 wc->opcode = IB_WC_RECV;
641 wc->pkey_index = pkey_index;
642 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
647 wc->dlid_path_bits = 0;
648 wc->port_num = port_num;
652 * Return 0 if SMP is to be sent
653 * Return 1 if SMP was consumed locally (whether or not solicited)
654 * Return < 0 if error
656 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
657 struct ib_mad_send_wr_private *mad_send_wr)
660 struct ib_smp *smp = mad_send_wr->send_buf.mad;
662 struct ib_mad_local_private *local;
663 struct ib_mad_private *mad_priv;
664 struct ib_mad_port_private *port_priv;
665 struct ib_mad_agent_private *recv_mad_agent = NULL;
666 struct ib_device *device = mad_agent_priv->agent.device;
667 u8 port_num = mad_agent_priv->agent.port_num;
669 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
671 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
673 printk(KERN_ERR PFX "Invalid directed route\n");
676 /* Check to post send on QP or process locally */
677 ret = smi_check_local_dr_smp(smp, device, port_num);
678 if (!ret || !device->process_mad)
681 local = kmalloc(sizeof *local, GFP_ATOMIC);
684 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
687 local->mad_priv = NULL;
688 local->recv_mad_agent = NULL;
689 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
692 printk(KERN_ERR PFX "No memory for local response MAD\n");
697 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
698 send_wr->wr.ud.pkey_index,
699 send_wr->wr.ud.port_num, &mad_wc);
701 /* No GRH for DR SMP */
702 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
703 (struct ib_mad *)smp,
704 (struct ib_mad *)&mad_priv->mad);
707 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
708 if (response_mad(&mad_priv->mad.mad) &&
709 mad_agent_priv->agent.recv_handler) {
710 local->mad_priv = mad_priv;
711 local->recv_mad_agent = mad_agent_priv;
713 * Reference MAD agent until receive
714 * side of local completion handled
716 atomic_inc(&mad_agent_priv->refcount);
718 kmem_cache_free(ib_mad_cache, mad_priv);
720 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
721 kmem_cache_free(ib_mad_cache, mad_priv);
723 case IB_MAD_RESULT_SUCCESS:
724 /* Treat like an incoming receive MAD */
725 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
726 mad_agent_priv->agent.port_num);
728 mad_priv->mad.mad.mad_hdr.tid =
729 ((struct ib_mad *)smp)->mad_hdr.tid;
730 recv_mad_agent = find_mad_agent(port_priv,
733 if (!port_priv || !recv_mad_agent) {
734 kmem_cache_free(ib_mad_cache, mad_priv);
739 local->mad_priv = mad_priv;
740 local->recv_mad_agent = recv_mad_agent;
743 kmem_cache_free(ib_mad_cache, mad_priv);
749 local->mad_send_wr = mad_send_wr;
750 /* Reference MAD agent until send side of local completion handled */
751 atomic_inc(&mad_agent_priv->refcount);
752 /* Queue local completion to local list */
753 spin_lock_irqsave(&mad_agent_priv->lock, flags);
754 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
755 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
756 queue_work(mad_agent_priv->qp_info->port_priv->wq,
757 &mad_agent_priv->local_work);
763 static int get_buf_length(int hdr_len, int data_len)
767 seg_size = sizeof(struct ib_mad) - hdr_len;
768 if (data_len && seg_size) {
769 pad = seg_size - data_len % seg_size;
774 return hdr_len + data_len + pad;
777 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
778 u32 remote_qpn, u16 pkey_index,
780 int hdr_len, int data_len,
783 struct ib_mad_agent_private *mad_agent_priv;
784 struct ib_mad_send_wr_private *mad_send_wr;
788 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
790 buf_size = get_buf_length(hdr_len, data_len);
792 if ((!mad_agent->rmpp_version &&
793 (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
794 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
795 return ERR_PTR(-EINVAL);
797 buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
799 return ERR_PTR(-ENOMEM);
800 memset(buf, 0, sizeof *mad_send_wr + buf_size);
802 mad_send_wr = buf + buf_size;
803 mad_send_wr->send_buf.mad = buf;
805 mad_send_wr->mad_agent_priv = mad_agent_priv;
806 mad_send_wr->sg_list[0].length = buf_size;
807 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
809 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
810 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
811 mad_send_wr->send_wr.num_sge = 1;
812 mad_send_wr->send_wr.opcode = IB_WR_SEND;
813 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
814 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
815 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
816 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
819 struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad;
820 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
821 IB_MGMT_RMPP_HDR + data_len);
822 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
823 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
824 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
825 IB_MGMT_RMPP_FLAG_ACTIVE);
828 mad_send_wr->send_buf.mad_agent = mad_agent;
829 atomic_inc(&mad_agent_priv->refcount);
830 return &mad_send_wr->send_buf;
832 EXPORT_SYMBOL(ib_create_send_mad);
834 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
836 struct ib_mad_agent_private *mad_agent_priv;
838 mad_agent_priv = container_of(send_buf->mad_agent,
839 struct ib_mad_agent_private, agent);
840 kfree(send_buf->mad);
842 if (atomic_dec_and_test(&mad_agent_priv->refcount))
843 wake_up(&mad_agent_priv->wait);
845 EXPORT_SYMBOL(ib_free_send_mad);
847 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
849 struct ib_mad_qp_info *qp_info;
850 struct list_head *list;
851 struct ib_send_wr *bad_send_wr;
852 struct ib_mad_agent *mad_agent;
857 /* Set WR ID to find mad_send_wr upon completion */
858 qp_info = mad_send_wr->mad_agent_priv->qp_info;
859 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
860 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
862 mad_agent = mad_send_wr->send_buf.mad_agent;
863 sge = mad_send_wr->sg_list;
864 sge->addr = dma_map_single(mad_agent->device->dma_device,
865 mad_send_wr->send_buf.mad, sge->length,
867 pci_unmap_addr_set(mad_send_wr, mapping, sge->addr);
869 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
870 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
871 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
873 list = &qp_info->send_queue.list;
876 list = &qp_info->overflow_list;
880 qp_info->send_queue.count++;
881 list_add_tail(&mad_send_wr->mad_list.list, list);
883 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
885 dma_unmap_single(mad_agent->device->dma_device,
886 pci_unmap_addr(mad_send_wr, mapping),
887 sge->length, DMA_TO_DEVICE);
893 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
894 * with the registered client
896 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
897 struct ib_mad_send_buf **bad_send_buf)
899 struct ib_mad_agent_private *mad_agent_priv;
900 struct ib_mad_send_buf *next_send_buf;
901 struct ib_mad_send_wr_private *mad_send_wr;
905 /* Walk list of send WRs and post each on send list */
906 for (; send_buf; send_buf = next_send_buf) {
908 mad_send_wr = container_of(send_buf,
909 struct ib_mad_send_wr_private,
911 mad_agent_priv = mad_send_wr->mad_agent_priv;
913 if (!send_buf->mad_agent->send_handler ||
914 (send_buf->timeout_ms &&
915 !send_buf->mad_agent->recv_handler)) {
921 * Save pointer to next work request to post in case the
922 * current one completes, and the user modifies the work
923 * request associated with the completion
925 next_send_buf = send_buf->next;
926 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
928 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
929 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
930 ret = handle_outgoing_dr_smp(mad_agent_priv,
932 if (ret < 0) /* error */
934 else if (ret == 1) /* locally consumed */
938 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
939 /* Timeout will be updated after send completes */
940 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
941 mad_send_wr->retries = send_buf->retries;
942 /* Reference for work request to QP + response */
943 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
944 mad_send_wr->status = IB_WC_SUCCESS;
946 /* Reference MAD agent until send completes */
947 atomic_inc(&mad_agent_priv->refcount);
948 spin_lock_irqsave(&mad_agent_priv->lock, flags);
949 list_add_tail(&mad_send_wr->agent_list,
950 &mad_agent_priv->send_list);
951 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
953 if (mad_agent_priv->agent.rmpp_version) {
954 ret = ib_send_rmpp_mad(mad_send_wr);
955 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
956 ret = ib_send_mad(mad_send_wr);
958 ret = ib_send_mad(mad_send_wr);
960 /* Fail send request */
961 spin_lock_irqsave(&mad_agent_priv->lock, flags);
962 list_del(&mad_send_wr->agent_list);
963 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
964 atomic_dec(&mad_agent_priv->refcount);
971 *bad_send_buf = send_buf;
974 EXPORT_SYMBOL(ib_post_send_mad);
977 * ib_free_recv_mad - Returns data buffers used to receive
978 * a MAD to the access layer
980 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
982 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
983 struct ib_mad_private_header *mad_priv_hdr;
984 struct ib_mad_private *priv;
985 struct list_head free_list;
987 INIT_LIST_HEAD(&free_list);
988 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
990 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
992 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
994 mad_priv_hdr = container_of(mad_recv_wc,
995 struct ib_mad_private_header,
997 priv = container_of(mad_priv_hdr, struct ib_mad_private,
999 kmem_cache_free(ib_mad_cache, priv);
1002 EXPORT_SYMBOL(ib_free_recv_mad);
1004 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1006 ib_mad_send_handler send_handler,
1007 ib_mad_recv_handler recv_handler,
1010 return ERR_PTR(-EINVAL); /* XXX: for now */
1012 EXPORT_SYMBOL(ib_redirect_mad_qp);
1014 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1017 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1020 EXPORT_SYMBOL(ib_process_mad_wc);
1022 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1023 struct ib_mad_reg_req *mad_reg_req)
1027 for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
1028 i < IB_MGMT_MAX_METHODS;
1029 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1031 if ((*method)->agent[i]) {
1032 printk(KERN_ERR PFX "Method %d already in use\n", i);
1039 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1041 /* Allocate management method table */
1042 *method = kmalloc(sizeof **method, GFP_ATOMIC);
1044 printk(KERN_ERR PFX "No memory for "
1045 "ib_mad_mgmt_method_table\n");
1048 /* Clear management method table */
1049 memset(*method, 0, sizeof **method);
1055 * Check to see if there are any methods still in use
1057 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1061 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1062 if (method->agent[i])
1068 * Check to see if there are any method tables for this class still in use
1070 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1074 for (i = 0; i < MAX_MGMT_CLASS; i++)
1075 if (class->method_table[i])
1080 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1084 for (i = 0; i < MAX_MGMT_OUI; i++)
1085 if (vendor_class->method_table[i])
1090 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1095 for (i = 0; i < MAX_MGMT_OUI; i++)
1096 /* Is there matching OUI for this vendor class ? */
1097 if (!memcmp(vendor_class->oui[i], oui, 3))
1103 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1107 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1108 if (vendor->vendor_class[i])
1114 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1115 struct ib_mad_agent_private *agent)
1119 /* Remove any methods for this mad agent */
1120 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1121 if (method->agent[i] == agent) {
1122 method->agent[i] = NULL;
1127 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1128 struct ib_mad_agent_private *agent_priv,
1131 struct ib_mad_port_private *port_priv;
1132 struct ib_mad_mgmt_class_table **class;
1133 struct ib_mad_mgmt_method_table **method;
1136 port_priv = agent_priv->qp_info->port_priv;
1137 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1139 /* Allocate management class table for "new" class version */
1140 *class = kmalloc(sizeof **class, GFP_ATOMIC);
1142 printk(KERN_ERR PFX "No memory for "
1143 "ib_mad_mgmt_class_table\n");
1147 /* Clear management class table */
1148 memset(*class, 0, sizeof(**class));
1149 /* Allocate method table for this management class */
1150 method = &(*class)->method_table[mgmt_class];
1151 if ((ret = allocate_method_table(method)))
1154 method = &(*class)->method_table[mgmt_class];
1156 /* Allocate method table for this management class */
1157 if ((ret = allocate_method_table(method)))
1162 /* Now, make sure methods are not already in use */
1163 if (method_in_use(method, mad_reg_req))
1166 /* Finally, add in methods being registered */
1167 for (i = find_first_bit(mad_reg_req->method_mask,
1168 IB_MGMT_MAX_METHODS);
1169 i < IB_MGMT_MAX_METHODS;
1170 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1172 (*method)->agent[i] = agent_priv;
1177 /* Remove any methods for this mad agent */
1178 remove_methods_mad_agent(*method, agent_priv);
1179 /* Now, check to see if there are any methods in use */
1180 if (!check_method_table(*method)) {
1181 /* If not, release management method table */
1194 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1195 struct ib_mad_agent_private *agent_priv)
1197 struct ib_mad_port_private *port_priv;
1198 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1199 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1200 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1201 struct ib_mad_mgmt_method_table **method;
1202 int i, ret = -ENOMEM;
1205 /* "New" vendor (with OUI) class */
1206 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1207 port_priv = agent_priv->qp_info->port_priv;
1208 vendor_table = &port_priv->version[
1209 mad_reg_req->mgmt_class_version].vendor;
1210 if (!*vendor_table) {
1211 /* Allocate mgmt vendor class table for "new" class version */
1212 vendor = kmalloc(sizeof *vendor, GFP_ATOMIC);
1214 printk(KERN_ERR PFX "No memory for "
1215 "ib_mad_mgmt_vendor_class_table\n");
1218 /* Clear management vendor class table */
1219 memset(vendor, 0, sizeof(*vendor));
1220 *vendor_table = vendor;
1222 if (!(*vendor_table)->vendor_class[vclass]) {
1223 /* Allocate table for this management vendor class */
1224 vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC);
1225 if (!vendor_class) {
1226 printk(KERN_ERR PFX "No memory for "
1227 "ib_mad_mgmt_vendor_class\n");
1230 memset(vendor_class, 0, sizeof(*vendor_class));
1231 (*vendor_table)->vendor_class[vclass] = vendor_class;
1233 for (i = 0; i < MAX_MGMT_OUI; i++) {
1234 /* Is there matching OUI for this vendor class ? */
1235 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1236 mad_reg_req->oui, 3)) {
1237 method = &(*vendor_table)->vendor_class[
1238 vclass]->method_table[i];
1243 for (i = 0; i < MAX_MGMT_OUI; i++) {
1244 /* OUI slot available ? */
1245 if (!is_vendor_oui((*vendor_table)->vendor_class[
1247 method = &(*vendor_table)->vendor_class[
1248 vclass]->method_table[i];
1250 /* Allocate method table for this OUI */
1251 if ((ret = allocate_method_table(method)))
1253 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1254 mad_reg_req->oui, 3);
1258 printk(KERN_ERR PFX "All OUI slots in use\n");
1262 /* Now, make sure methods are not already in use */
1263 if (method_in_use(method, mad_reg_req))
1266 /* Finally, add in methods being registered */
1267 for (i = find_first_bit(mad_reg_req->method_mask,
1268 IB_MGMT_MAX_METHODS);
1269 i < IB_MGMT_MAX_METHODS;
1270 i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
1272 (*method)->agent[i] = agent_priv;
1277 /* Remove any methods for this mad agent */
1278 remove_methods_mad_agent(*method, agent_priv);
1279 /* Now, check to see if there are any methods in use */
1280 if (!check_method_table(*method)) {
1281 /* If not, release management method table */
1288 (*vendor_table)->vendor_class[vclass] = NULL;
1289 kfree(vendor_class);
1293 *vendor_table = NULL;
1300 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1302 struct ib_mad_port_private *port_priv;
1303 struct ib_mad_mgmt_class_table *class;
1304 struct ib_mad_mgmt_method_table *method;
1305 struct ib_mad_mgmt_vendor_class_table *vendor;
1306 struct ib_mad_mgmt_vendor_class *vendor_class;
1311 * Was MAD registration request supplied
1312 * with original registration ?
1314 if (!agent_priv->reg_req) {
1318 port_priv = agent_priv->qp_info->port_priv;
1319 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1320 class = port_priv->version[
1321 agent_priv->reg_req->mgmt_class_version].class;
1325 method = class->method_table[mgmt_class];
1327 /* Remove any methods for this mad agent */
1328 remove_methods_mad_agent(method, agent_priv);
1329 /* Now, check to see if there are any methods still in use */
1330 if (!check_method_table(method)) {
1331 /* If not, release management method table */
1333 class->method_table[mgmt_class] = NULL;
1334 /* Any management classes left ? */
1335 if (!check_class_table(class)) {
1336 /* If not, release management class table */
1339 agent_priv->reg_req->
1340 mgmt_class_version].class = NULL;
1346 if (!is_vendor_class(mgmt_class))
1349 /* normalize mgmt_class to vendor range 2 */
1350 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1351 vendor = port_priv->version[
1352 agent_priv->reg_req->mgmt_class_version].vendor;
1357 vendor_class = vendor->vendor_class[mgmt_class];
1359 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1362 method = vendor_class->method_table[index];
1364 /* Remove any methods for this mad agent */
1365 remove_methods_mad_agent(method, agent_priv);
1367 * Now, check to see if there are
1368 * any methods still in use
1370 if (!check_method_table(method)) {
1371 /* If not, release management method table */
1373 vendor_class->method_table[index] = NULL;
1374 memset(vendor_class->oui[index], 0, 3);
1375 /* Any OUIs left ? */
1376 if (!check_vendor_class(vendor_class)) {
1377 /* If not, release vendor class table */
1378 kfree(vendor_class);
1379 vendor->vendor_class[mgmt_class] = NULL;
1380 /* Any other vendor classes left ? */
1381 if (!check_vendor_table(vendor)) {
1384 agent_priv->reg_req->
1385 mgmt_class_version].
1397 static struct ib_mad_agent_private *
1398 find_mad_agent(struct ib_mad_port_private *port_priv,
1401 struct ib_mad_agent_private *mad_agent = NULL;
1402 unsigned long flags;
1404 spin_lock_irqsave(&port_priv->reg_lock, flags);
1405 if (response_mad(mad)) {
1407 struct ib_mad_agent_private *entry;
1410 * Routing is based on high 32 bits of transaction ID
1413 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1414 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1415 if (entry->agent.hi_tid == hi_tid) {
1421 struct ib_mad_mgmt_class_table *class;
1422 struct ib_mad_mgmt_method_table *method;
1423 struct ib_mad_mgmt_vendor_class_table *vendor;
1424 struct ib_mad_mgmt_vendor_class *vendor_class;
1425 struct ib_vendor_mad *vendor_mad;
1429 * Routing is based on version, class, and method
1430 * For "newer" vendor MADs, also based on OUI
1432 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1434 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1435 class = port_priv->version[
1436 mad->mad_hdr.class_version].class;
1439 method = class->method_table[convert_mgmt_class(
1440 mad->mad_hdr.mgmt_class)];
1442 mad_agent = method->agent[mad->mad_hdr.method &
1443 ~IB_MGMT_METHOD_RESP];
1445 vendor = port_priv->version[
1446 mad->mad_hdr.class_version].vendor;
1449 vendor_class = vendor->vendor_class[vendor_class_index(
1450 mad->mad_hdr.mgmt_class)];
1453 /* Find matching OUI */
1454 vendor_mad = (struct ib_vendor_mad *)mad;
1455 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1458 method = vendor_class->method_table[index];
1460 mad_agent = method->agent[mad->mad_hdr.method &
1461 ~IB_MGMT_METHOD_RESP];
1467 if (mad_agent->agent.recv_handler)
1468 atomic_inc(&mad_agent->refcount);
1470 printk(KERN_NOTICE PFX "No receive handler for client "
1472 &mad_agent->agent, port_priv->port_num);
1477 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1482 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1486 /* Make sure MAD base version is understood */
1487 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1488 printk(KERN_ERR PFX "MAD received with unsupported base "
1489 "version %d\n", mad->mad_hdr.base_version);
1493 /* Filter SMI packets sent to other than QP0 */
1494 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1495 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1499 /* Filter GSI packets sent to QP0 */
1508 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1509 struct ib_mad_hdr *mad_hdr)
1511 struct ib_rmpp_mad *rmpp_mad;
1513 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1514 return !mad_agent_priv->agent.rmpp_version ||
1515 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1516 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1517 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1520 struct ib_mad_send_wr_private*
1521 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1523 struct ib_mad_send_wr_private *mad_send_wr;
1525 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1527 if (mad_send_wr->tid == tid)
1532 * It's possible to receive the response before we've
1533 * been notified that the send has completed
1535 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1537 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1538 mad_send_wr->tid == tid && mad_send_wr->timeout) {
1539 /* Verify request has not been canceled */
1540 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1547 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1549 mad_send_wr->timeout = 0;
1550 if (mad_send_wr->refcount == 1) {
1551 list_del(&mad_send_wr->agent_list);
1552 list_add_tail(&mad_send_wr->agent_list,
1553 &mad_send_wr->mad_agent_priv->done_list);
1557 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1558 struct ib_mad_recv_wc *mad_recv_wc)
1560 struct ib_mad_send_wr_private *mad_send_wr;
1561 struct ib_mad_send_wc mad_send_wc;
1562 unsigned long flags;
1565 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1566 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1567 if (mad_agent_priv->agent.rmpp_version) {
1568 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1571 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1572 wake_up(&mad_agent_priv->wait);
1577 /* Complete corresponding request */
1578 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1579 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1580 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1581 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
1583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1584 ib_free_recv_mad(mad_recv_wc);
1585 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1586 wake_up(&mad_agent_priv->wait);
1589 ib_mark_mad_done(mad_send_wr);
1590 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1592 /* Defined behavior is to complete response before request */
1593 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1594 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1596 atomic_dec(&mad_agent_priv->refcount);
1598 mad_send_wc.status = IB_WC_SUCCESS;
1599 mad_send_wc.vendor_err = 0;
1600 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1601 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1603 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1605 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1606 wake_up(&mad_agent_priv->wait);
1610 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1613 struct ib_mad_qp_info *qp_info;
1614 struct ib_mad_private_header *mad_priv_hdr;
1615 struct ib_mad_private *recv, *response;
1616 struct ib_mad_list_head *mad_list;
1617 struct ib_mad_agent_private *mad_agent;
1619 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1621 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1622 "for response buffer\n");
1624 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1625 qp_info = mad_list->mad_queue->qp_info;
1626 dequeue_mad(mad_list);
1628 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1630 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1631 dma_unmap_single(port_priv->device->dma_device,
1632 pci_unmap_addr(&recv->header, mapping),
1633 sizeof(struct ib_mad_private) -
1634 sizeof(struct ib_mad_private_header),
1637 /* Setup MAD receive work completion from "normal" work completion */
1638 recv->header.wc = *wc;
1639 recv->header.recv_wc.wc = &recv->header.wc;
1640 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1641 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1642 recv->header.recv_wc.recv_buf.grh = &recv->grh;
1644 if (atomic_read(&qp_info->snoop_count))
1645 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1648 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1651 if (recv->mad.mad.mad_hdr.mgmt_class ==
1652 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1653 if (!smi_handle_dr_smp_recv(&recv->mad.smp,
1654 port_priv->device->node_type,
1655 port_priv->port_num,
1656 port_priv->device->phys_port_cnt))
1658 if (!smi_check_forward_dr_smp(&recv->mad.smp))
1660 if (!smi_handle_dr_smp_send(&recv->mad.smp,
1661 port_priv->device->node_type,
1662 port_priv->port_num))
1664 if (!smi_check_local_dr_smp(&recv->mad.smp,
1666 port_priv->port_num))
1671 /* Give driver "right of first refusal" on incoming MAD */
1672 if (port_priv->device->process_mad) {
1676 printk(KERN_ERR PFX "No memory for response MAD\n");
1678 * Is it better to assume that
1679 * it wouldn't be processed ?
1684 ret = port_priv->device->process_mad(port_priv->device, 0,
1685 port_priv->port_num,
1688 &response->mad.mad);
1689 if (ret & IB_MAD_RESULT_SUCCESS) {
1690 if (ret & IB_MAD_RESULT_CONSUMED)
1692 if (ret & IB_MAD_RESULT_REPLY) {
1693 agent_send_response(&response->mad.mad,
1696 port_priv->port_num,
1697 qp_info->qp->qp_num);
1703 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1705 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1707 * recv is freed up in error cases in ib_mad_complete_recv
1708 * or via recv_handler in ib_mad_complete_recv()
1714 /* Post another receive request for this QP */
1716 ib_mad_post_receive_mads(qp_info, response);
1718 kmem_cache_free(ib_mad_cache, recv);
1720 ib_mad_post_receive_mads(qp_info, recv);
1723 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1725 struct ib_mad_send_wr_private *mad_send_wr;
1726 unsigned long delay;
1728 if (list_empty(&mad_agent_priv->wait_list)) {
1729 cancel_delayed_work(&mad_agent_priv->timed_work);
1731 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1732 struct ib_mad_send_wr_private,
1735 if (time_after(mad_agent_priv->timeout,
1736 mad_send_wr->timeout)) {
1737 mad_agent_priv->timeout = mad_send_wr->timeout;
1738 cancel_delayed_work(&mad_agent_priv->timed_work);
1739 delay = mad_send_wr->timeout - jiffies;
1740 if ((long)delay <= 0)
1742 queue_delayed_work(mad_agent_priv->qp_info->
1744 &mad_agent_priv->timed_work, delay);
1749 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1751 struct ib_mad_agent_private *mad_agent_priv;
1752 struct ib_mad_send_wr_private *temp_mad_send_wr;
1753 struct list_head *list_item;
1754 unsigned long delay;
1756 mad_agent_priv = mad_send_wr->mad_agent_priv;
1757 list_del(&mad_send_wr->agent_list);
1759 delay = mad_send_wr->timeout;
1760 mad_send_wr->timeout += jiffies;
1763 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
1764 temp_mad_send_wr = list_entry(list_item,
1765 struct ib_mad_send_wr_private,
1767 if (time_after(mad_send_wr->timeout,
1768 temp_mad_send_wr->timeout))
1773 list_item = &mad_agent_priv->wait_list;
1774 list_add(&mad_send_wr->agent_list, list_item);
1776 /* Reschedule a work item if we have a shorter timeout */
1777 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
1778 cancel_delayed_work(&mad_agent_priv->timed_work);
1779 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
1780 &mad_agent_priv->timed_work, delay);
1784 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1787 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1788 wait_for_response(mad_send_wr);
1792 * Process a send work completion
1794 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1795 struct ib_mad_send_wc *mad_send_wc)
1797 struct ib_mad_agent_private *mad_agent_priv;
1798 unsigned long flags;
1801 mad_agent_priv = mad_send_wr->mad_agent_priv;
1802 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1803 if (mad_agent_priv->agent.rmpp_version) {
1804 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1805 if (ret == IB_RMPP_RESULT_CONSUMED)
1808 ret = IB_RMPP_RESULT_UNHANDLED;
1810 if (mad_send_wc->status != IB_WC_SUCCESS &&
1811 mad_send_wr->status == IB_WC_SUCCESS) {
1812 mad_send_wr->status = mad_send_wc->status;
1813 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1816 if (--mad_send_wr->refcount > 0) {
1817 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
1818 mad_send_wr->status == IB_WC_SUCCESS) {
1819 wait_for_response(mad_send_wr);
1824 /* Remove send from MAD agent and notify client of completion */
1825 list_del(&mad_send_wr->agent_list);
1826 adjust_timeout(mad_agent_priv);
1827 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1829 if (mad_send_wr->status != IB_WC_SUCCESS )
1830 mad_send_wc->status = mad_send_wr->status;
1831 if (ret == IB_RMPP_RESULT_INTERNAL)
1832 ib_rmpp_send_handler(mad_send_wc);
1834 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1837 /* Release reference on agent taken when sending */
1838 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1839 wake_up(&mad_agent_priv->wait);
1842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1845 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1848 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1849 struct ib_mad_list_head *mad_list;
1850 struct ib_mad_qp_info *qp_info;
1851 struct ib_mad_queue *send_queue;
1852 struct ib_send_wr *bad_send_wr;
1853 struct ib_mad_send_wc mad_send_wc;
1854 unsigned long flags;
1857 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1858 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1860 send_queue = mad_list->mad_queue;
1861 qp_info = send_queue->qp_info;
1864 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
1865 pci_unmap_addr(mad_send_wr, mapping),
1866 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
1867 queued_send_wr = NULL;
1868 spin_lock_irqsave(&send_queue->lock, flags);
1869 list_del(&mad_list->list);
1871 /* Move queued send to the send queue */
1872 if (send_queue->count-- > send_queue->max_active) {
1873 mad_list = container_of(qp_info->overflow_list.next,
1874 struct ib_mad_list_head, list);
1875 queued_send_wr = container_of(mad_list,
1876 struct ib_mad_send_wr_private,
1878 list_del(&mad_list->list);
1879 list_add_tail(&mad_list->list, &send_queue->list);
1881 spin_unlock_irqrestore(&send_queue->lock, flags);
1883 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1884 mad_send_wc.status = wc->status;
1885 mad_send_wc.vendor_err = wc->vendor_err;
1886 if (atomic_read(&qp_info->snoop_count))
1887 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1888 IB_MAD_SNOOP_SEND_COMPLETIONS);
1889 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1891 if (queued_send_wr) {
1892 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1895 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1896 mad_send_wr = queued_send_wr;
1897 wc->status = IB_WC_LOC_QP_OP_ERR;
1903 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
1905 struct ib_mad_send_wr_private *mad_send_wr;
1906 struct ib_mad_list_head *mad_list;
1907 unsigned long flags;
1909 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1910 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
1911 mad_send_wr = container_of(mad_list,
1912 struct ib_mad_send_wr_private,
1914 mad_send_wr->retry = 1;
1916 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1919 static void mad_error_handler(struct ib_mad_port_private *port_priv,
1922 struct ib_mad_list_head *mad_list;
1923 struct ib_mad_qp_info *qp_info;
1924 struct ib_mad_send_wr_private *mad_send_wr;
1927 /* Determine if failure was a send or receive */
1928 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1929 qp_info = mad_list->mad_queue->qp_info;
1930 if (mad_list->mad_queue == &qp_info->recv_queue)
1932 * Receive errors indicate that the QP has entered the error
1933 * state - error handling/shutdown code will cleanup
1938 * Send errors will transition the QP to SQE - move
1939 * QP to RTS and repost flushed work requests
1941 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
1943 if (wc->status == IB_WC_WR_FLUSH_ERR) {
1944 if (mad_send_wr->retry) {
1946 struct ib_send_wr *bad_send_wr;
1948 mad_send_wr->retry = 0;
1949 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
1952 ib_mad_send_done_handler(port_priv, wc);
1954 ib_mad_send_done_handler(port_priv, wc);
1956 struct ib_qp_attr *attr;
1958 /* Transition QP to RTS and fail offending send */
1959 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1961 attr->qp_state = IB_QPS_RTS;
1962 attr->cur_qp_state = IB_QPS_SQE;
1963 ret = ib_modify_qp(qp_info->qp, attr,
1964 IB_QP_STATE | IB_QP_CUR_STATE);
1967 printk(KERN_ERR PFX "mad_error_handler - "
1968 "ib_modify_qp to RTS : %d\n", ret);
1970 mark_sends_for_retry(qp_info);
1972 ib_mad_send_done_handler(port_priv, wc);
1977 * IB MAD completion callback
1979 static void ib_mad_completion_handler(void *data)
1981 struct ib_mad_port_private *port_priv;
1984 port_priv = (struct ib_mad_port_private *)data;
1985 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
1987 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
1988 if (wc.status == IB_WC_SUCCESS) {
1989 switch (wc.opcode) {
1991 ib_mad_send_done_handler(port_priv, &wc);
1994 ib_mad_recv_done_handler(port_priv, &wc);
2001 mad_error_handler(port_priv, &wc);
2005 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2007 unsigned long flags;
2008 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2009 struct ib_mad_send_wc mad_send_wc;
2010 struct list_head cancel_list;
2012 INIT_LIST_HEAD(&cancel_list);
2014 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2015 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2016 &mad_agent_priv->send_list, agent_list) {
2017 if (mad_send_wr->status == IB_WC_SUCCESS) {
2018 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2019 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2023 /* Empty wait list to prevent receives from finding a request */
2024 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2025 /* Empty local completion list as well */
2026 list_splice_init(&mad_agent_priv->local_list, &cancel_list);
2027 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2029 /* Report all cancelled requests */
2030 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2031 mad_send_wc.vendor_err = 0;
2033 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2034 &cancel_list, agent_list) {
2035 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2036 list_del(&mad_send_wr->agent_list);
2037 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2039 atomic_dec(&mad_agent_priv->refcount);
2043 static struct ib_mad_send_wr_private*
2044 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2045 struct ib_mad_send_buf *send_buf)
2047 struct ib_mad_send_wr_private *mad_send_wr;
2049 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2051 if (&mad_send_wr->send_buf == send_buf)
2055 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2057 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2058 &mad_send_wr->send_buf == send_buf)
2064 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2065 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2067 struct ib_mad_agent_private *mad_agent_priv;
2068 struct ib_mad_send_wr_private *mad_send_wr;
2069 unsigned long flags;
2072 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2074 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2075 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2076 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2077 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2081 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2083 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2084 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2087 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2089 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2091 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2093 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2096 EXPORT_SYMBOL(ib_modify_mad);
2098 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2099 struct ib_mad_send_buf *send_buf)
2101 ib_modify_mad(mad_agent, send_buf, 0);
2103 EXPORT_SYMBOL(ib_cancel_mad);
2105 static void local_completions(void *data)
2107 struct ib_mad_agent_private *mad_agent_priv;
2108 struct ib_mad_local_private *local;
2109 struct ib_mad_agent_private *recv_mad_agent;
2110 unsigned long flags;
2113 struct ib_mad_send_wc mad_send_wc;
2115 mad_agent_priv = (struct ib_mad_agent_private *)data;
2117 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2118 while (!list_empty(&mad_agent_priv->local_list)) {
2119 local = list_entry(mad_agent_priv->local_list.next,
2120 struct ib_mad_local_private,
2122 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2123 if (local->mad_priv) {
2124 recv_mad_agent = local->recv_mad_agent;
2125 if (!recv_mad_agent) {
2126 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2127 goto local_send_completion;
2132 * Defined behavior is to complete response
2135 build_smp_wc((unsigned long) local->mad_send_wr,
2136 be16_to_cpu(IB_LID_PERMISSIVE),
2137 0, recv_mad_agent->agent.port_num, &wc);
2139 local->mad_priv->header.recv_wc.wc = &wc;
2140 local->mad_priv->header.recv_wc.mad_len =
2141 sizeof(struct ib_mad);
2142 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2143 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2144 &local->mad_priv->header.recv_wc.rmpp_list);
2145 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2146 local->mad_priv->header.recv_wc.recv_buf.mad =
2147 &local->mad_priv->mad.mad;
2148 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2149 snoop_recv(recv_mad_agent->qp_info,
2150 &local->mad_priv->header.recv_wc,
2151 IB_MAD_SNOOP_RECVS);
2152 recv_mad_agent->agent.recv_handler(
2153 &recv_mad_agent->agent,
2154 &local->mad_priv->header.recv_wc);
2155 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2156 atomic_dec(&recv_mad_agent->refcount);
2157 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2160 local_send_completion:
2162 mad_send_wc.status = IB_WC_SUCCESS;
2163 mad_send_wc.vendor_err = 0;
2164 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2165 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2166 snoop_send(mad_agent_priv->qp_info,
2167 &local->mad_send_wr->send_buf,
2168 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2169 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2172 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2173 list_del(&local->completion_list);
2174 atomic_dec(&mad_agent_priv->refcount);
2176 kmem_cache_free(ib_mad_cache, local->mad_priv);
2179 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2182 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2186 if (!mad_send_wr->retries--)
2189 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2191 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2192 ret = ib_retry_rmpp(mad_send_wr);
2194 case IB_RMPP_RESULT_UNHANDLED:
2195 ret = ib_send_mad(mad_send_wr);
2197 case IB_RMPP_RESULT_CONSUMED:
2205 ret = ib_send_mad(mad_send_wr);
2208 mad_send_wr->refcount++;
2209 list_add_tail(&mad_send_wr->agent_list,
2210 &mad_send_wr->mad_agent_priv->send_list);
2215 static void timeout_sends(void *data)
2217 struct ib_mad_agent_private *mad_agent_priv;
2218 struct ib_mad_send_wr_private *mad_send_wr;
2219 struct ib_mad_send_wc mad_send_wc;
2220 unsigned long flags, delay;
2222 mad_agent_priv = (struct ib_mad_agent_private *)data;
2223 mad_send_wc.vendor_err = 0;
2225 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2226 while (!list_empty(&mad_agent_priv->wait_list)) {
2227 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2228 struct ib_mad_send_wr_private,
2231 if (time_after(mad_send_wr->timeout, jiffies)) {
2232 delay = mad_send_wr->timeout - jiffies;
2233 if ((long)delay <= 0)
2235 queue_delayed_work(mad_agent_priv->qp_info->
2237 &mad_agent_priv->timed_work, delay);
2241 list_del(&mad_send_wr->agent_list);
2242 if (mad_send_wr->status == IB_WC_SUCCESS &&
2243 !retry_send(mad_send_wr))
2246 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2248 if (mad_send_wr->status == IB_WC_SUCCESS)
2249 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2251 mad_send_wc.status = mad_send_wr->status;
2252 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2253 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2256 atomic_dec(&mad_agent_priv->refcount);
2257 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2259 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2262 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2264 struct ib_mad_port_private *port_priv = cq->cq_context;
2266 queue_work(port_priv->wq, &port_priv->work);
2270 * Allocate receive MADs and post receive WRs for them
2272 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2273 struct ib_mad_private *mad)
2275 unsigned long flags;
2277 struct ib_mad_private *mad_priv;
2278 struct ib_sge sg_list;
2279 struct ib_recv_wr recv_wr, *bad_recv_wr;
2280 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2282 /* Initialize common scatter list fields */
2283 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2284 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2286 /* Initialize common receive WR fields */
2287 recv_wr.next = NULL;
2288 recv_wr.sg_list = &sg_list;
2289 recv_wr.num_sge = 1;
2292 /* Allocate and map receive buffer */
2297 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2299 printk(KERN_ERR PFX "No memory for receive buffer\n");
2304 sg_list.addr = dma_map_single(qp_info->port_priv->
2308 sizeof mad_priv->header,
2310 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2311 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2312 mad_priv->header.mad_list.mad_queue = recv_queue;
2314 /* Post receive WR */
2315 spin_lock_irqsave(&recv_queue->lock, flags);
2316 post = (++recv_queue->count < recv_queue->max_active);
2317 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2318 spin_unlock_irqrestore(&recv_queue->lock, flags);
2319 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2321 spin_lock_irqsave(&recv_queue->lock, flags);
2322 list_del(&mad_priv->header.mad_list.list);
2323 recv_queue->count--;
2324 spin_unlock_irqrestore(&recv_queue->lock, flags);
2325 dma_unmap_single(qp_info->port_priv->device->dma_device,
2326 pci_unmap_addr(&mad_priv->header,
2329 sizeof mad_priv->header,
2331 kmem_cache_free(ib_mad_cache, mad_priv);
2332 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2341 * Return all the posted receive MADs
2343 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2345 struct ib_mad_private_header *mad_priv_hdr;
2346 struct ib_mad_private *recv;
2347 struct ib_mad_list_head *mad_list;
2349 while (!list_empty(&qp_info->recv_queue.list)) {
2351 mad_list = list_entry(qp_info->recv_queue.list.next,
2352 struct ib_mad_list_head, list);
2353 mad_priv_hdr = container_of(mad_list,
2354 struct ib_mad_private_header,
2356 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2359 /* Remove from posted receive MAD list */
2360 list_del(&mad_list->list);
2362 dma_unmap_single(qp_info->port_priv->device->dma_device,
2363 pci_unmap_addr(&recv->header, mapping),
2364 sizeof(struct ib_mad_private) -
2365 sizeof(struct ib_mad_private_header),
2367 kmem_cache_free(ib_mad_cache, recv);
2370 qp_info->recv_queue.count = 0;
2376 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2379 struct ib_qp_attr *attr;
2382 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2384 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2388 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2389 qp = port_priv->qp_info[i].qp;
2391 * PKey index for QP1 is irrelevant but
2392 * one is needed for the Reset to Init transition
2394 attr->qp_state = IB_QPS_INIT;
2395 attr->pkey_index = 0;
2396 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2397 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2398 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2400 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2401 "INIT: %d\n", i, ret);
2405 attr->qp_state = IB_QPS_RTR;
2406 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2408 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2409 "RTR: %d\n", i, ret);
2413 attr->qp_state = IB_QPS_RTS;
2414 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2415 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2417 printk(KERN_ERR PFX "Couldn't change QP%d state to "
2418 "RTS: %d\n", i, ret);
2423 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2425 printk(KERN_ERR PFX "Failed to request completion "
2426 "notification: %d\n", ret);
2430 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2431 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2433 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2442 static void qp_event_handler(struct ib_event *event, void *qp_context)
2444 struct ib_mad_qp_info *qp_info = qp_context;
2446 /* It's worse than that! He's dead, Jim! */
2447 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2448 event->event, qp_info->qp->qp_num);
2451 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2452 struct ib_mad_queue *mad_queue)
2454 mad_queue->qp_info = qp_info;
2455 mad_queue->count = 0;
2456 spin_lock_init(&mad_queue->lock);
2457 INIT_LIST_HEAD(&mad_queue->list);
2460 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2461 struct ib_mad_qp_info *qp_info)
2463 qp_info->port_priv = port_priv;
2464 init_mad_queue(qp_info, &qp_info->send_queue);
2465 init_mad_queue(qp_info, &qp_info->recv_queue);
2466 INIT_LIST_HEAD(&qp_info->overflow_list);
2467 spin_lock_init(&qp_info->snoop_lock);
2468 qp_info->snoop_table = NULL;
2469 qp_info->snoop_table_size = 0;
2470 atomic_set(&qp_info->snoop_count, 0);
2473 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2474 enum ib_qp_type qp_type)
2476 struct ib_qp_init_attr qp_init_attr;
2479 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2480 qp_init_attr.send_cq = qp_info->port_priv->cq;
2481 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2482 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2483 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
2484 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
2485 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2486 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2487 qp_init_attr.qp_type = qp_type;
2488 qp_init_attr.port_num = qp_info->port_priv->port_num;
2489 qp_init_attr.qp_context = qp_info;
2490 qp_init_attr.event_handler = qp_event_handler;
2491 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2492 if (IS_ERR(qp_info->qp)) {
2493 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2494 get_spl_qp_index(qp_type));
2495 ret = PTR_ERR(qp_info->qp);
2498 /* Use minimum queue sizes unless the CQ is resized */
2499 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
2500 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
2507 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2509 ib_destroy_qp(qp_info->qp);
2510 if (qp_info->snoop_table)
2511 kfree(qp_info->snoop_table);
2516 * Create the QP, PD, MR, and CQ if needed
2518 static int ib_mad_port_open(struct ib_device *device,
2522 struct ib_mad_port_private *port_priv;
2523 unsigned long flags;
2524 char name[sizeof "ib_mad123"];
2526 /* Create new device info */
2527 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
2529 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2532 memset(port_priv, 0, sizeof *port_priv);
2533 port_priv->device = device;
2534 port_priv->port_num = port_num;
2535 spin_lock_init(&port_priv->reg_lock);
2536 INIT_LIST_HEAD(&port_priv->agent_list);
2537 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2538 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2540 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2541 port_priv->cq = ib_create_cq(port_priv->device,
2542 ib_mad_thread_completion_handler,
2543 NULL, port_priv, cq_size);
2544 if (IS_ERR(port_priv->cq)) {
2545 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2546 ret = PTR_ERR(port_priv->cq);
2550 port_priv->pd = ib_alloc_pd(device);
2551 if (IS_ERR(port_priv->pd)) {
2552 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2553 ret = PTR_ERR(port_priv->pd);
2557 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2558 if (IS_ERR(port_priv->mr)) {
2559 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2560 ret = PTR_ERR(port_priv->mr);
2564 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2567 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2571 snprintf(name, sizeof name, "ib_mad%d", port_num);
2572 port_priv->wq = create_singlethread_workqueue(name);
2573 if (!port_priv->wq) {
2577 INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
2579 ret = ib_mad_port_start(port_priv);
2581 printk(KERN_ERR PFX "Couldn't start port\n");
2585 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2586 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2587 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2591 destroy_workqueue(port_priv->wq);
2593 destroy_mad_qp(&port_priv->qp_info[1]);
2595 destroy_mad_qp(&port_priv->qp_info[0]);
2597 ib_dereg_mr(port_priv->mr);
2599 ib_dealloc_pd(port_priv->pd);
2601 ib_destroy_cq(port_priv->cq);
2602 cleanup_recv_queue(&port_priv->qp_info[1]);
2603 cleanup_recv_queue(&port_priv->qp_info[0]);
2612 * If there are no classes using the port, free the port
2613 * resources (CQ, MR, PD, QP) and remove the port's info structure
2615 static int ib_mad_port_close(struct ib_device *device, int port_num)
2617 struct ib_mad_port_private *port_priv;
2618 unsigned long flags;
2620 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2621 port_priv = __ib_get_mad_port(device, port_num);
2622 if (port_priv == NULL) {
2623 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2624 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2627 list_del(&port_priv->port_list);
2628 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2630 /* Stop processing completions. */
2631 flush_workqueue(port_priv->wq);
2632 destroy_workqueue(port_priv->wq);
2633 destroy_mad_qp(&port_priv->qp_info[1]);
2634 destroy_mad_qp(&port_priv->qp_info[0]);
2635 ib_dereg_mr(port_priv->mr);
2636 ib_dealloc_pd(port_priv->pd);
2637 ib_destroy_cq(port_priv->cq);
2638 cleanup_recv_queue(&port_priv->qp_info[1]);
2639 cleanup_recv_queue(&port_priv->qp_info[0]);
2640 /* XXX: Handle deallocation of MAD registration tables */
2647 static void ib_mad_init_device(struct ib_device *device)
2651 if (device->node_type == IB_NODE_SWITCH) {
2656 end = device->phys_port_cnt;
2659 for (i = start; i <= end; i++) {
2660 if (ib_mad_port_open(device, i)) {
2661 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2665 if (ib_agent_port_open(device, i)) {
2666 printk(KERN_ERR PFX "Couldn't open %s port %d "
2675 if (ib_mad_port_close(device, i))
2676 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2682 while (i >= start) {
2683 if (ib_agent_port_close(device, i))
2684 printk(KERN_ERR PFX "Couldn't close %s port %d "
2687 if (ib_mad_port_close(device, i))
2688 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2694 static void ib_mad_remove_device(struct ib_device *device)
2696 int i, num_ports, cur_port;
2698 if (device->node_type == IB_NODE_SWITCH) {
2702 num_ports = device->phys_port_cnt;
2705 for (i = 0; i < num_ports; i++, cur_port++) {
2706 if (ib_agent_port_close(device, cur_port))
2707 printk(KERN_ERR PFX "Couldn't close %s port %d "
2709 device->name, cur_port);
2710 if (ib_mad_port_close(device, cur_port))
2711 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2712 device->name, cur_port);
2716 static struct ib_client mad_client = {
2718 .add = ib_mad_init_device,
2719 .remove = ib_mad_remove_device
2722 static int __init ib_mad_init_module(void)
2726 spin_lock_init(&ib_mad_port_list_lock);
2728 ib_mad_cache = kmem_cache_create("ib_mad",
2729 sizeof(struct ib_mad_private),
2734 if (!ib_mad_cache) {
2735 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
2740 INIT_LIST_HEAD(&ib_mad_port_list);
2742 if (ib_register_client(&mad_client)) {
2743 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
2751 kmem_cache_destroy(ib_mad_cache);
2756 static void __exit ib_mad_cleanup_module(void)
2758 ib_unregister_client(&mad_client);
2760 if (kmem_cache_destroy(ib_mad_cache)) {
2761 printk(KERN_DEBUG PFX "Failed to destroy ib_mad cache\n");
2765 module_init(ib_mad_init_module);
2766 module_exit(ib_mad_cleanup_module);