Merge branches 'core-fixes-for-linus' and 'irq-fixes-for-linus' of git://git.kernel...
[pandora-kernel.git] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  * Copyright (c) 2009 HNR Consulting. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  */
36 #include <linux/dma-mapping.h>
37 #include <linux/slab.h>
38 #include <rdma/ib_cache.h>
39
40 #include "mad_priv.h"
41 #include "mad_rmpp.h"
42 #include "smi.h"
43 #include "agent.h"
44
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_DESCRIPTION("kernel IB MAD API");
47 MODULE_AUTHOR("Hal Rosenstock");
48 MODULE_AUTHOR("Sean Hefty");
49
50 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
51 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
52
53 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
54 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
55 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
56 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
57
58 static struct kmem_cache *ib_mad_cache;
59
60 static struct list_head ib_mad_port_list;
61 static u32 ib_mad_client_id = 0;
62
63 /* Port list lock */
64 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
65
66 /* Forward declarations */
67 static int method_in_use(struct ib_mad_mgmt_method_table **method,
68                          struct ib_mad_reg_req *mad_reg_req);
69 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70 static struct ib_mad_agent_private *find_mad_agent(
71                                         struct ib_mad_port_private *port_priv,
72                                         struct ib_mad *mad);
73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74                                     struct ib_mad_private *mad);
75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76 static void timeout_sends(struct work_struct *work);
77 static void local_completions(struct work_struct *work);
78 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79                               struct ib_mad_agent_private *agent_priv,
80                               u8 mgmt_class);
81 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82                            struct ib_mad_agent_private *agent_priv);
83
84 /*
85  * Returns a ib_mad_port_private structure or NULL for a device/port
86  * Assumes ib_mad_port_list_lock is being held
87  */
88 static inline struct ib_mad_port_private *
89 __ib_get_mad_port(struct ib_device *device, int port_num)
90 {
91         struct ib_mad_port_private *entry;
92
93         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
94                 if (entry->device == device && entry->port_num == port_num)
95                         return entry;
96         }
97         return NULL;
98 }
99
100 /*
101  * Wrapper function to return a ib_mad_port_private structure or NULL
102  * for a device/port
103  */
104 static inline struct ib_mad_port_private *
105 ib_get_mad_port(struct ib_device *device, int port_num)
106 {
107         struct ib_mad_port_private *entry;
108         unsigned long flags;
109
110         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
111         entry = __ib_get_mad_port(device, port_num);
112         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
113
114         return entry;
115 }
116
117 static inline u8 convert_mgmt_class(u8 mgmt_class)
118 {
119         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
120         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
121                 0 : mgmt_class;
122 }
123
124 static int get_spl_qp_index(enum ib_qp_type qp_type)
125 {
126         switch (qp_type)
127         {
128         case IB_QPT_SMI:
129                 return 0;
130         case IB_QPT_GSI:
131                 return 1;
132         default:
133                 return -1;
134         }
135 }
136
137 static int vendor_class_index(u8 mgmt_class)
138 {
139         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
140 }
141
142 static int is_vendor_class(u8 mgmt_class)
143 {
144         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
145             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
146                 return 0;
147         return 1;
148 }
149
150 static int is_vendor_oui(char *oui)
151 {
152         if (oui[0] || oui[1] || oui[2])
153                 return 1;
154         return 0;
155 }
156
157 static int is_vendor_method_in_use(
158                 struct ib_mad_mgmt_vendor_class *vendor_class,
159                 struct ib_mad_reg_req *mad_reg_req)
160 {
161         struct ib_mad_mgmt_method_table *method;
162         int i;
163
164         for (i = 0; i < MAX_MGMT_OUI; i++) {
165                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
166                         method = vendor_class->method_table[i];
167                         if (method) {
168                                 if (method_in_use(&method, mad_reg_req))
169                                         return 1;
170                                 else
171                                         break;
172                         }
173                 }
174         }
175         return 0;
176 }
177
178 int ib_response_mad(struct ib_mad *mad)
179 {
180         return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
181                 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
182                 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
183                  (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
184 }
185 EXPORT_SYMBOL(ib_response_mad);
186
187 /*
188  * ib_register_mad_agent - Register to send/receive MADs
189  */
190 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
191                                            u8 port_num,
192                                            enum ib_qp_type qp_type,
193                                            struct ib_mad_reg_req *mad_reg_req,
194                                            u8 rmpp_version,
195                                            ib_mad_send_handler send_handler,
196                                            ib_mad_recv_handler recv_handler,
197                                            void *context)
198 {
199         struct ib_mad_port_private *port_priv;
200         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
201         struct ib_mad_agent_private *mad_agent_priv;
202         struct ib_mad_reg_req *reg_req = NULL;
203         struct ib_mad_mgmt_class_table *class;
204         struct ib_mad_mgmt_vendor_class_table *vendor;
205         struct ib_mad_mgmt_vendor_class *vendor_class;
206         struct ib_mad_mgmt_method_table *method;
207         int ret2, qpn;
208         unsigned long flags;
209         u8 mgmt_class, vclass;
210
211         /* Validate parameters */
212         qpn = get_spl_qp_index(qp_type);
213         if (qpn == -1)
214                 goto error1;
215
216         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
217                 goto error1;
218
219         /* Validate MAD registration request if supplied */
220         if (mad_reg_req) {
221                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
222                         goto error1;
223                 if (!recv_handler)
224                         goto error1;
225                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
226                         /*
227                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
228                          * one in this range currently allowed
229                          */
230                         if (mad_reg_req->mgmt_class !=
231                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
232                                 goto error1;
233                 } else if (mad_reg_req->mgmt_class == 0) {
234                         /*
235                          * Class 0 is reserved in IBA and is used for
236                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
237                          */
238                         goto error1;
239                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
240                         /*
241                          * If class is in "new" vendor range,
242                          * ensure supplied OUI is not zero
243                          */
244                         if (!is_vendor_oui(mad_reg_req->oui))
245                                 goto error1;
246                 }
247                 /* Make sure class supplied is consistent with RMPP */
248                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
249                         if (rmpp_version)
250                                 goto error1;
251                 }
252                 /* Make sure class supplied is consistent with QP type */
253                 if (qp_type == IB_QPT_SMI) {
254                         if ((mad_reg_req->mgmt_class !=
255                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
256                             (mad_reg_req->mgmt_class !=
257                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
258                                 goto error1;
259                 } else {
260                         if ((mad_reg_req->mgmt_class ==
261                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
262                             (mad_reg_req->mgmt_class ==
263                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
264                                 goto error1;
265                 }
266         } else {
267                 /* No registration request supplied */
268                 if (!send_handler)
269                         goto error1;
270         }
271
272         /* Validate device and port */
273         port_priv = ib_get_mad_port(device, port_num);
274         if (!port_priv) {
275                 ret = ERR_PTR(-ENODEV);
276                 goto error1;
277         }
278
279         /* Verify the QP requested is supported.  For example, Ethernet devices
280          * will not have QP0 */
281         if (!port_priv->qp_info[qpn].qp) {
282                 ret = ERR_PTR(-EPROTONOSUPPORT);
283                 goto error1;
284         }
285
286         /* Allocate structures */
287         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
288         if (!mad_agent_priv) {
289                 ret = ERR_PTR(-ENOMEM);
290                 goto error1;
291         }
292
293         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
294                                                  IB_ACCESS_LOCAL_WRITE);
295         if (IS_ERR(mad_agent_priv->agent.mr)) {
296                 ret = ERR_PTR(-ENOMEM);
297                 goto error2;
298         }
299
300         if (mad_reg_req) {
301                 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
302                 if (!reg_req) {
303                         ret = ERR_PTR(-ENOMEM);
304                         goto error3;
305                 }
306         }
307
308         /* Now, fill in the various structures */
309         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
310         mad_agent_priv->reg_req = reg_req;
311         mad_agent_priv->agent.rmpp_version = rmpp_version;
312         mad_agent_priv->agent.device = device;
313         mad_agent_priv->agent.recv_handler = recv_handler;
314         mad_agent_priv->agent.send_handler = send_handler;
315         mad_agent_priv->agent.context = context;
316         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
317         mad_agent_priv->agent.port_num = port_num;
318         spin_lock_init(&mad_agent_priv->lock);
319         INIT_LIST_HEAD(&mad_agent_priv->send_list);
320         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
321         INIT_LIST_HEAD(&mad_agent_priv->done_list);
322         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
323         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
324         INIT_LIST_HEAD(&mad_agent_priv->local_list);
325         INIT_WORK(&mad_agent_priv->local_work, local_completions);
326         atomic_set(&mad_agent_priv->refcount, 1);
327         init_completion(&mad_agent_priv->comp);
328
329         spin_lock_irqsave(&port_priv->reg_lock, flags);
330         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
331
332         /*
333          * Make sure MAD registration (if supplied)
334          * is non overlapping with any existing ones
335          */
336         if (mad_reg_req) {
337                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
338                 if (!is_vendor_class(mgmt_class)) {
339                         class = port_priv->version[mad_reg_req->
340                                                    mgmt_class_version].class;
341                         if (class) {
342                                 method = class->method_table[mgmt_class];
343                                 if (method) {
344                                         if (method_in_use(&method,
345                                                            mad_reg_req))
346                                                 goto error4;
347                                 }
348                         }
349                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
350                                                   mgmt_class);
351                 } else {
352                         /* "New" vendor class range */
353                         vendor = port_priv->version[mad_reg_req->
354                                                     mgmt_class_version].vendor;
355                         if (vendor) {
356                                 vclass = vendor_class_index(mgmt_class);
357                                 vendor_class = vendor->vendor_class[vclass];
358                                 if (vendor_class) {
359                                         if (is_vendor_method_in_use(
360                                                         vendor_class,
361                                                         mad_reg_req))
362                                                 goto error4;
363                                 }
364                         }
365                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
366                 }
367                 if (ret2) {
368                         ret = ERR_PTR(ret2);
369                         goto error4;
370                 }
371         }
372
373         /* Add mad agent into port's agent list */
374         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
375         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
376
377         return &mad_agent_priv->agent;
378
379 error4:
380         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
381         kfree(reg_req);
382 error3:
383         ib_dereg_mr(mad_agent_priv->agent.mr);
384 error2:
385         kfree(mad_agent_priv);
386 error1:
387         return ret;
388 }
389 EXPORT_SYMBOL(ib_register_mad_agent);
390
391 static inline int is_snooping_sends(int mad_snoop_flags)
392 {
393         return (mad_snoop_flags &
394                 (/*IB_MAD_SNOOP_POSTED_SENDS |
395                  IB_MAD_SNOOP_RMPP_SENDS |*/
396                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
397                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
398 }
399
400 static inline int is_snooping_recvs(int mad_snoop_flags)
401 {
402         return (mad_snoop_flags &
403                 (IB_MAD_SNOOP_RECVS /*|
404                  IB_MAD_SNOOP_RMPP_RECVS*/));
405 }
406
407 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
408                                 struct ib_mad_snoop_private *mad_snoop_priv)
409 {
410         struct ib_mad_snoop_private **new_snoop_table;
411         unsigned long flags;
412         int i;
413
414         spin_lock_irqsave(&qp_info->snoop_lock, flags);
415         /* Check for empty slot in array. */
416         for (i = 0; i < qp_info->snoop_table_size; i++)
417                 if (!qp_info->snoop_table[i])
418                         break;
419
420         if (i == qp_info->snoop_table_size) {
421                 /* Grow table. */
422                 new_snoop_table = krealloc(qp_info->snoop_table,
423                                            sizeof mad_snoop_priv *
424                                            (qp_info->snoop_table_size + 1),
425                                            GFP_ATOMIC);
426                 if (!new_snoop_table) {
427                         i = -ENOMEM;
428                         goto out;
429                 }
430
431                 qp_info->snoop_table = new_snoop_table;
432                 qp_info->snoop_table_size++;
433         }
434         qp_info->snoop_table[i] = mad_snoop_priv;
435         atomic_inc(&qp_info->snoop_count);
436 out:
437         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
438         return i;
439 }
440
441 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
442                                            u8 port_num,
443                                            enum ib_qp_type qp_type,
444                                            int mad_snoop_flags,
445                                            ib_mad_snoop_handler snoop_handler,
446                                            ib_mad_recv_handler recv_handler,
447                                            void *context)
448 {
449         struct ib_mad_port_private *port_priv;
450         struct ib_mad_agent *ret;
451         struct ib_mad_snoop_private *mad_snoop_priv;
452         int qpn;
453
454         /* Validate parameters */
455         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
456             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
457                 ret = ERR_PTR(-EINVAL);
458                 goto error1;
459         }
460         qpn = get_spl_qp_index(qp_type);
461         if (qpn == -1) {
462                 ret = ERR_PTR(-EINVAL);
463                 goto error1;
464         }
465         port_priv = ib_get_mad_port(device, port_num);
466         if (!port_priv) {
467                 ret = ERR_PTR(-ENODEV);
468                 goto error1;
469         }
470         /* Allocate structures */
471         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
472         if (!mad_snoop_priv) {
473                 ret = ERR_PTR(-ENOMEM);
474                 goto error1;
475         }
476
477         /* Now, fill in the various structures */
478         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
479         mad_snoop_priv->agent.device = device;
480         mad_snoop_priv->agent.recv_handler = recv_handler;
481         mad_snoop_priv->agent.snoop_handler = snoop_handler;
482         mad_snoop_priv->agent.context = context;
483         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
484         mad_snoop_priv->agent.port_num = port_num;
485         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
486         init_completion(&mad_snoop_priv->comp);
487         mad_snoop_priv->snoop_index = register_snoop_agent(
488                                                 &port_priv->qp_info[qpn],
489                                                 mad_snoop_priv);
490         if (mad_snoop_priv->snoop_index < 0) {
491                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
492                 goto error2;
493         }
494
495         atomic_set(&mad_snoop_priv->refcount, 1);
496         return &mad_snoop_priv->agent;
497
498 error2:
499         kfree(mad_snoop_priv);
500 error1:
501         return ret;
502 }
503 EXPORT_SYMBOL(ib_register_mad_snoop);
504
505 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
506 {
507         if (atomic_dec_and_test(&mad_agent_priv->refcount))
508                 complete(&mad_agent_priv->comp);
509 }
510
511 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
512 {
513         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
514                 complete(&mad_snoop_priv->comp);
515 }
516
517 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
518 {
519         struct ib_mad_port_private *port_priv;
520         unsigned long flags;
521
522         /* Note that we could still be handling received MADs */
523
524         /*
525          * Canceling all sends results in dropping received response
526          * MADs, preventing us from queuing additional work
527          */
528         cancel_mads(mad_agent_priv);
529         port_priv = mad_agent_priv->qp_info->port_priv;
530         cancel_delayed_work(&mad_agent_priv->timed_work);
531
532         spin_lock_irqsave(&port_priv->reg_lock, flags);
533         remove_mad_reg_req(mad_agent_priv);
534         list_del(&mad_agent_priv->agent_list);
535         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
536
537         flush_workqueue(port_priv->wq);
538         ib_cancel_rmpp_recvs(mad_agent_priv);
539
540         deref_mad_agent(mad_agent_priv);
541         wait_for_completion(&mad_agent_priv->comp);
542
543         kfree(mad_agent_priv->reg_req);
544         ib_dereg_mr(mad_agent_priv->agent.mr);
545         kfree(mad_agent_priv);
546 }
547
548 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
549 {
550         struct ib_mad_qp_info *qp_info;
551         unsigned long flags;
552
553         qp_info = mad_snoop_priv->qp_info;
554         spin_lock_irqsave(&qp_info->snoop_lock, flags);
555         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
556         atomic_dec(&qp_info->snoop_count);
557         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
558
559         deref_snoop_agent(mad_snoop_priv);
560         wait_for_completion(&mad_snoop_priv->comp);
561
562         kfree(mad_snoop_priv);
563 }
564
565 /*
566  * ib_unregister_mad_agent - Unregisters a client from using MAD services
567  */
568 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
569 {
570         struct ib_mad_agent_private *mad_agent_priv;
571         struct ib_mad_snoop_private *mad_snoop_priv;
572
573         /* If the TID is zero, the agent can only snoop. */
574         if (mad_agent->hi_tid) {
575                 mad_agent_priv = container_of(mad_agent,
576                                               struct ib_mad_agent_private,
577                                               agent);
578                 unregister_mad_agent(mad_agent_priv);
579         } else {
580                 mad_snoop_priv = container_of(mad_agent,
581                                               struct ib_mad_snoop_private,
582                                               agent);
583                 unregister_mad_snoop(mad_snoop_priv);
584         }
585         return 0;
586 }
587 EXPORT_SYMBOL(ib_unregister_mad_agent);
588
589 static void dequeue_mad(struct ib_mad_list_head *mad_list)
590 {
591         struct ib_mad_queue *mad_queue;
592         unsigned long flags;
593
594         BUG_ON(!mad_list->mad_queue);
595         mad_queue = mad_list->mad_queue;
596         spin_lock_irqsave(&mad_queue->lock, flags);
597         list_del(&mad_list->list);
598         mad_queue->count--;
599         spin_unlock_irqrestore(&mad_queue->lock, flags);
600 }
601
602 static void snoop_send(struct ib_mad_qp_info *qp_info,
603                        struct ib_mad_send_buf *send_buf,
604                        struct ib_mad_send_wc *mad_send_wc,
605                        int mad_snoop_flags)
606 {
607         struct ib_mad_snoop_private *mad_snoop_priv;
608         unsigned long flags;
609         int i;
610
611         spin_lock_irqsave(&qp_info->snoop_lock, flags);
612         for (i = 0; i < qp_info->snoop_table_size; i++) {
613                 mad_snoop_priv = qp_info->snoop_table[i];
614                 if (!mad_snoop_priv ||
615                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
616                         continue;
617
618                 atomic_inc(&mad_snoop_priv->refcount);
619                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
620                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
621                                                     send_buf, mad_send_wc);
622                 deref_snoop_agent(mad_snoop_priv);
623                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
624         }
625         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
626 }
627
628 static void snoop_recv(struct ib_mad_qp_info *qp_info,
629                        struct ib_mad_recv_wc *mad_recv_wc,
630                        int mad_snoop_flags)
631 {
632         struct ib_mad_snoop_private *mad_snoop_priv;
633         unsigned long flags;
634         int i;
635
636         spin_lock_irqsave(&qp_info->snoop_lock, flags);
637         for (i = 0; i < qp_info->snoop_table_size; i++) {
638                 mad_snoop_priv = qp_info->snoop_table[i];
639                 if (!mad_snoop_priv ||
640                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
641                         continue;
642
643                 atomic_inc(&mad_snoop_priv->refcount);
644                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
645                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
646                                                    mad_recv_wc);
647                 deref_snoop_agent(mad_snoop_priv);
648                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
649         }
650         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
651 }
652
653 static void build_smp_wc(struct ib_qp *qp,
654                          u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
655                          struct ib_wc *wc)
656 {
657         memset(wc, 0, sizeof *wc);
658         wc->wr_id = wr_id;
659         wc->status = IB_WC_SUCCESS;
660         wc->opcode = IB_WC_RECV;
661         wc->pkey_index = pkey_index;
662         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
663         wc->src_qp = IB_QP0;
664         wc->qp = qp;
665         wc->slid = slid;
666         wc->sl = 0;
667         wc->dlid_path_bits = 0;
668         wc->port_num = port_num;
669 }
670
671 /*
672  * Return 0 if SMP is to be sent
673  * Return 1 if SMP was consumed locally (whether or not solicited)
674  * Return < 0 if error
675  */
676 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
677                                   struct ib_mad_send_wr_private *mad_send_wr)
678 {
679         int ret = 0;
680         struct ib_smp *smp = mad_send_wr->send_buf.mad;
681         unsigned long flags;
682         struct ib_mad_local_private *local;
683         struct ib_mad_private *mad_priv;
684         struct ib_mad_port_private *port_priv;
685         struct ib_mad_agent_private *recv_mad_agent = NULL;
686         struct ib_device *device = mad_agent_priv->agent.device;
687         u8 port_num;
688         struct ib_wc mad_wc;
689         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
690
691         if (device->node_type == RDMA_NODE_IB_SWITCH &&
692             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
693                 port_num = send_wr->wr.ud.port_num;
694         else
695                 port_num = mad_agent_priv->agent.port_num;
696
697         /*
698          * Directed route handling starts if the initial LID routed part of
699          * a request or the ending LID routed part of a response is empty.
700          * If we are at the start of the LID routed part, don't update the
701          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
702          */
703         if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
704              IB_LID_PERMISSIVE &&
705              smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
706              IB_SMI_DISCARD) {
707                 ret = -EINVAL;
708                 printk(KERN_ERR PFX "Invalid directed route\n");
709                 goto out;
710         }
711
712         /* Check to post send on QP or process locally */
713         if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
714             smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
715                 goto out;
716
717         local = kmalloc(sizeof *local, GFP_ATOMIC);
718         if (!local) {
719                 ret = -ENOMEM;
720                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
721                 goto out;
722         }
723         local->mad_priv = NULL;
724         local->recv_mad_agent = NULL;
725         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
726         if (!mad_priv) {
727                 ret = -ENOMEM;
728                 printk(KERN_ERR PFX "No memory for local response MAD\n");
729                 kfree(local);
730                 goto out;
731         }
732
733         build_smp_wc(mad_agent_priv->agent.qp,
734                      send_wr->wr_id, be16_to_cpu(smp->dr_slid),
735                      send_wr->wr.ud.pkey_index,
736                      send_wr->wr.ud.port_num, &mad_wc);
737
738         /* No GRH for DR SMP */
739         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
740                                   (struct ib_mad *)smp,
741                                   (struct ib_mad *)&mad_priv->mad);
742         switch (ret)
743         {
744         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
745                 if (ib_response_mad(&mad_priv->mad.mad) &&
746                     mad_agent_priv->agent.recv_handler) {
747                         local->mad_priv = mad_priv;
748                         local->recv_mad_agent = mad_agent_priv;
749                         /*
750                          * Reference MAD agent until receive
751                          * side of local completion handled
752                          */
753                         atomic_inc(&mad_agent_priv->refcount);
754                 } else
755                         kmem_cache_free(ib_mad_cache, mad_priv);
756                 break;
757         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
758                 kmem_cache_free(ib_mad_cache, mad_priv);
759                 break;
760         case IB_MAD_RESULT_SUCCESS:
761                 /* Treat like an incoming receive MAD */
762                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
763                                             mad_agent_priv->agent.port_num);
764                 if (port_priv) {
765                         memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
766                         recv_mad_agent = find_mad_agent(port_priv,
767                                                         &mad_priv->mad.mad);
768                 }
769                 if (!port_priv || !recv_mad_agent) {
770                         /*
771                          * No receiving agent so drop packet and
772                          * generate send completion.
773                          */
774                         kmem_cache_free(ib_mad_cache, mad_priv);
775                         break;
776                 }
777                 local->mad_priv = mad_priv;
778                 local->recv_mad_agent = recv_mad_agent;
779                 break;
780         default:
781                 kmem_cache_free(ib_mad_cache, mad_priv);
782                 kfree(local);
783                 ret = -EINVAL;
784                 goto out;
785         }
786
787         local->mad_send_wr = mad_send_wr;
788         /* Reference MAD agent until send side of local completion handled */
789         atomic_inc(&mad_agent_priv->refcount);
790         /* Queue local completion to local list */
791         spin_lock_irqsave(&mad_agent_priv->lock, flags);
792         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
793         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
794         queue_work(mad_agent_priv->qp_info->port_priv->wq,
795                    &mad_agent_priv->local_work);
796         ret = 1;
797 out:
798         return ret;
799 }
800
801 static int get_pad_size(int hdr_len, int data_len)
802 {
803         int seg_size, pad;
804
805         seg_size = sizeof(struct ib_mad) - hdr_len;
806         if (data_len && seg_size) {
807                 pad = seg_size - data_len % seg_size;
808                 return pad == seg_size ? 0 : pad;
809         } else
810                 return seg_size;
811 }
812
813 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
814 {
815         struct ib_rmpp_segment *s, *t;
816
817         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
818                 list_del(&s->list);
819                 kfree(s);
820         }
821 }
822
823 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
824                                 gfp_t gfp_mask)
825 {
826         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
827         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
828         struct ib_rmpp_segment *seg = NULL;
829         int left, seg_size, pad;
830
831         send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
832         seg_size = send_buf->seg_size;
833         pad = send_wr->pad;
834
835         /* Allocate data segments. */
836         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
837                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
838                 if (!seg) {
839                         printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
840                                "alloc failed for len %zd, gfp %#x\n",
841                                sizeof (*seg) + seg_size, gfp_mask);
842                         free_send_rmpp_list(send_wr);
843                         return -ENOMEM;
844                 }
845                 seg->num = ++send_buf->seg_count;
846                 list_add_tail(&seg->list, &send_wr->rmpp_list);
847         }
848
849         /* Zero any padding */
850         if (pad)
851                 memset(seg->data + seg_size - pad, 0, pad);
852
853         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
854                                           agent.rmpp_version;
855         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
856         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
857
858         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
859                                         struct ib_rmpp_segment, list);
860         send_wr->last_ack_seg = send_wr->cur_seg;
861         return 0;
862 }
863
864 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
865                                             u32 remote_qpn, u16 pkey_index,
866                                             int rmpp_active,
867                                             int hdr_len, int data_len,
868                                             gfp_t gfp_mask)
869 {
870         struct ib_mad_agent_private *mad_agent_priv;
871         struct ib_mad_send_wr_private *mad_send_wr;
872         int pad, message_size, ret, size;
873         void *buf;
874
875         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
876                                       agent);
877         pad = get_pad_size(hdr_len, data_len);
878         message_size = hdr_len + data_len + pad;
879
880         if ((!mad_agent->rmpp_version &&
881              (rmpp_active || message_size > sizeof(struct ib_mad))) ||
882             (!rmpp_active && message_size > sizeof(struct ib_mad)))
883                 return ERR_PTR(-EINVAL);
884
885         size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
886         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
887         if (!buf)
888                 return ERR_PTR(-ENOMEM);
889
890         mad_send_wr = buf + size;
891         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
892         mad_send_wr->send_buf.mad = buf;
893         mad_send_wr->send_buf.hdr_len = hdr_len;
894         mad_send_wr->send_buf.data_len = data_len;
895         mad_send_wr->pad = pad;
896
897         mad_send_wr->mad_agent_priv = mad_agent_priv;
898         mad_send_wr->sg_list[0].length = hdr_len;
899         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
900         mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
901         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
902
903         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
904         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
905         mad_send_wr->send_wr.num_sge = 2;
906         mad_send_wr->send_wr.opcode = IB_WR_SEND;
907         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
908         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
909         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
910         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
911
912         if (rmpp_active) {
913                 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
914                 if (ret) {
915                         kfree(buf);
916                         return ERR_PTR(ret);
917                 }
918         }
919
920         mad_send_wr->send_buf.mad_agent = mad_agent;
921         atomic_inc(&mad_agent_priv->refcount);
922         return &mad_send_wr->send_buf;
923 }
924 EXPORT_SYMBOL(ib_create_send_mad);
925
926 int ib_get_mad_data_offset(u8 mgmt_class)
927 {
928         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
929                 return IB_MGMT_SA_HDR;
930         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
931                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
932                  (mgmt_class == IB_MGMT_CLASS_BIS))
933                 return IB_MGMT_DEVICE_HDR;
934         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
935                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
936                 return IB_MGMT_VENDOR_HDR;
937         else
938                 return IB_MGMT_MAD_HDR;
939 }
940 EXPORT_SYMBOL(ib_get_mad_data_offset);
941
942 int ib_is_mad_class_rmpp(u8 mgmt_class)
943 {
944         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
945             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
946             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
947             (mgmt_class == IB_MGMT_CLASS_BIS) ||
948             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
949              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
950                 return 1;
951         return 0;
952 }
953 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
954
955 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
956 {
957         struct ib_mad_send_wr_private *mad_send_wr;
958         struct list_head *list;
959
960         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
961                                    send_buf);
962         list = &mad_send_wr->cur_seg->list;
963
964         if (mad_send_wr->cur_seg->num < seg_num) {
965                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
966                         if (mad_send_wr->cur_seg->num == seg_num)
967                                 break;
968         } else if (mad_send_wr->cur_seg->num > seg_num) {
969                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
970                         if (mad_send_wr->cur_seg->num == seg_num)
971                                 break;
972         }
973         return mad_send_wr->cur_seg->data;
974 }
975 EXPORT_SYMBOL(ib_get_rmpp_segment);
976
977 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
978 {
979         if (mad_send_wr->send_buf.seg_count)
980                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
981                                            mad_send_wr->seg_num);
982         else
983                 return mad_send_wr->send_buf.mad +
984                        mad_send_wr->send_buf.hdr_len;
985 }
986
987 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
988 {
989         struct ib_mad_agent_private *mad_agent_priv;
990         struct ib_mad_send_wr_private *mad_send_wr;
991
992         mad_agent_priv = container_of(send_buf->mad_agent,
993                                       struct ib_mad_agent_private, agent);
994         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
995                                    send_buf);
996
997         free_send_rmpp_list(mad_send_wr);
998         kfree(send_buf->mad);
999         deref_mad_agent(mad_agent_priv);
1000 }
1001 EXPORT_SYMBOL(ib_free_send_mad);
1002
1003 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1004 {
1005         struct ib_mad_qp_info *qp_info;
1006         struct list_head *list;
1007         struct ib_send_wr *bad_send_wr;
1008         struct ib_mad_agent *mad_agent;
1009         struct ib_sge *sge;
1010         unsigned long flags;
1011         int ret;
1012
1013         /* Set WR ID to find mad_send_wr upon completion */
1014         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1015         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1016         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1017
1018         mad_agent = mad_send_wr->send_buf.mad_agent;
1019         sge = mad_send_wr->sg_list;
1020         sge[0].addr = ib_dma_map_single(mad_agent->device,
1021                                         mad_send_wr->send_buf.mad,
1022                                         sge[0].length,
1023                                         DMA_TO_DEVICE);
1024         mad_send_wr->header_mapping = sge[0].addr;
1025
1026         sge[1].addr = ib_dma_map_single(mad_agent->device,
1027                                         ib_get_payload(mad_send_wr),
1028                                         sge[1].length,
1029                                         DMA_TO_DEVICE);
1030         mad_send_wr->payload_mapping = sge[1].addr;
1031
1032         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1033         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1034                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1035                                    &bad_send_wr);
1036                 list = &qp_info->send_queue.list;
1037         } else {
1038                 ret = 0;
1039                 list = &qp_info->overflow_list;
1040         }
1041
1042         if (!ret) {
1043                 qp_info->send_queue.count++;
1044                 list_add_tail(&mad_send_wr->mad_list.list, list);
1045         }
1046         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1047         if (ret) {
1048                 ib_dma_unmap_single(mad_agent->device,
1049                                     mad_send_wr->header_mapping,
1050                                     sge[0].length, DMA_TO_DEVICE);
1051                 ib_dma_unmap_single(mad_agent->device,
1052                                     mad_send_wr->payload_mapping,
1053                                     sge[1].length, DMA_TO_DEVICE);
1054         }
1055         return ret;
1056 }
1057
1058 /*
1059  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1060  *  with the registered client
1061  */
1062 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1063                      struct ib_mad_send_buf **bad_send_buf)
1064 {
1065         struct ib_mad_agent_private *mad_agent_priv;
1066         struct ib_mad_send_buf *next_send_buf;
1067         struct ib_mad_send_wr_private *mad_send_wr;
1068         unsigned long flags;
1069         int ret = -EINVAL;
1070
1071         /* Walk list of send WRs and post each on send list */
1072         for (; send_buf; send_buf = next_send_buf) {
1073
1074                 mad_send_wr = container_of(send_buf,
1075                                            struct ib_mad_send_wr_private,
1076                                            send_buf);
1077                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1078
1079                 if (!send_buf->mad_agent->send_handler ||
1080                     (send_buf->timeout_ms &&
1081                      !send_buf->mad_agent->recv_handler)) {
1082                         ret = -EINVAL;
1083                         goto error;
1084                 }
1085
1086                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1087                         if (mad_agent_priv->agent.rmpp_version) {
1088                                 ret = -EINVAL;
1089                                 goto error;
1090                         }
1091                 }
1092
1093                 /*
1094                  * Save pointer to next work request to post in case the
1095                  * current one completes, and the user modifies the work
1096                  * request associated with the completion
1097                  */
1098                 next_send_buf = send_buf->next;
1099                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1100
1101                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1102                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1103                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1104                                                      mad_send_wr);
1105                         if (ret < 0)            /* error */
1106                                 goto error;
1107                         else if (ret == 1)      /* locally consumed */
1108                                 continue;
1109                 }
1110
1111                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1112                 /* Timeout will be updated after send completes */
1113                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1114                 mad_send_wr->max_retries = send_buf->retries;
1115                 mad_send_wr->retries_left = send_buf->retries;
1116                 send_buf->retries = 0;
1117                 /* Reference for work request to QP + response */
1118                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1119                 mad_send_wr->status = IB_WC_SUCCESS;
1120
1121                 /* Reference MAD agent until send completes */
1122                 atomic_inc(&mad_agent_priv->refcount);
1123                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1124                 list_add_tail(&mad_send_wr->agent_list,
1125                               &mad_agent_priv->send_list);
1126                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1127
1128                 if (mad_agent_priv->agent.rmpp_version) {
1129                         ret = ib_send_rmpp_mad(mad_send_wr);
1130                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1131                                 ret = ib_send_mad(mad_send_wr);
1132                 } else
1133                         ret = ib_send_mad(mad_send_wr);
1134                 if (ret < 0) {
1135                         /* Fail send request */
1136                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1137                         list_del(&mad_send_wr->agent_list);
1138                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1139                         atomic_dec(&mad_agent_priv->refcount);
1140                         goto error;
1141                 }
1142         }
1143         return 0;
1144 error:
1145         if (bad_send_buf)
1146                 *bad_send_buf = send_buf;
1147         return ret;
1148 }
1149 EXPORT_SYMBOL(ib_post_send_mad);
1150
1151 /*
1152  * ib_free_recv_mad - Returns data buffers used to receive
1153  *  a MAD to the access layer
1154  */
1155 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1156 {
1157         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1158         struct ib_mad_private_header *mad_priv_hdr;
1159         struct ib_mad_private *priv;
1160         struct list_head free_list;
1161
1162         INIT_LIST_HEAD(&free_list);
1163         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1164
1165         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1166                                         &free_list, list) {
1167                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1168                                            recv_buf);
1169                 mad_priv_hdr = container_of(mad_recv_wc,
1170                                             struct ib_mad_private_header,
1171                                             recv_wc);
1172                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1173                                     header);
1174                 kmem_cache_free(ib_mad_cache, priv);
1175         }
1176 }
1177 EXPORT_SYMBOL(ib_free_recv_mad);
1178
1179 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1180                                         u8 rmpp_version,
1181                                         ib_mad_send_handler send_handler,
1182                                         ib_mad_recv_handler recv_handler,
1183                                         void *context)
1184 {
1185         return ERR_PTR(-EINVAL);        /* XXX: for now */
1186 }
1187 EXPORT_SYMBOL(ib_redirect_mad_qp);
1188
1189 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1190                       struct ib_wc *wc)
1191 {
1192         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1193         return 0;
1194 }
1195 EXPORT_SYMBOL(ib_process_mad_wc);
1196
1197 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1198                          struct ib_mad_reg_req *mad_reg_req)
1199 {
1200         int i;
1201
1202         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1203                 if ((*method)->agent[i]) {
1204                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1205                         return -EINVAL;
1206                 }
1207         }
1208         return 0;
1209 }
1210
1211 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1212 {
1213         /* Allocate management method table */
1214         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1215         if (!*method) {
1216                 printk(KERN_ERR PFX "No memory for "
1217                        "ib_mad_mgmt_method_table\n");
1218                 return -ENOMEM;
1219         }
1220
1221         return 0;
1222 }
1223
1224 /*
1225  * Check to see if there are any methods still in use
1226  */
1227 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1228 {
1229         int i;
1230
1231         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1232                 if (method->agent[i])
1233                         return 1;
1234         return 0;
1235 }
1236
1237 /*
1238  * Check to see if there are any method tables for this class still in use
1239  */
1240 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1241 {
1242         int i;
1243
1244         for (i = 0; i < MAX_MGMT_CLASS; i++)
1245                 if (class->method_table[i])
1246                         return 1;
1247         return 0;
1248 }
1249
1250 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1251 {
1252         int i;
1253
1254         for (i = 0; i < MAX_MGMT_OUI; i++)
1255                 if (vendor_class->method_table[i])
1256                         return 1;
1257         return 0;
1258 }
1259
1260 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1261                            char *oui)
1262 {
1263         int i;
1264
1265         for (i = 0; i < MAX_MGMT_OUI; i++)
1266                 /* Is there matching OUI for this vendor class ? */
1267                 if (!memcmp(vendor_class->oui[i], oui, 3))
1268                         return i;
1269
1270         return -1;
1271 }
1272
1273 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1274 {
1275         int i;
1276
1277         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1278                 if (vendor->vendor_class[i])
1279                         return 1;
1280
1281         return 0;
1282 }
1283
1284 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1285                                      struct ib_mad_agent_private *agent)
1286 {
1287         int i;
1288
1289         /* Remove any methods for this mad agent */
1290         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1291                 if (method->agent[i] == agent) {
1292                         method->agent[i] = NULL;
1293                 }
1294         }
1295 }
1296
1297 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1298                               struct ib_mad_agent_private *agent_priv,
1299                               u8 mgmt_class)
1300 {
1301         struct ib_mad_port_private *port_priv;
1302         struct ib_mad_mgmt_class_table **class;
1303         struct ib_mad_mgmt_method_table **method;
1304         int i, ret;
1305
1306         port_priv = agent_priv->qp_info->port_priv;
1307         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1308         if (!*class) {
1309                 /* Allocate management class table for "new" class version */
1310                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1311                 if (!*class) {
1312                         printk(KERN_ERR PFX "No memory for "
1313                                "ib_mad_mgmt_class_table\n");
1314                         ret = -ENOMEM;
1315                         goto error1;
1316                 }
1317
1318                 /* Allocate method table for this management class */
1319                 method = &(*class)->method_table[mgmt_class];
1320                 if ((ret = allocate_method_table(method)))
1321                         goto error2;
1322         } else {
1323                 method = &(*class)->method_table[mgmt_class];
1324                 if (!*method) {
1325                         /* Allocate method table for this management class */
1326                         if ((ret = allocate_method_table(method)))
1327                                 goto error1;
1328                 }
1329         }
1330
1331         /* Now, make sure methods are not already in use */
1332         if (method_in_use(method, mad_reg_req))
1333                 goto error3;
1334
1335         /* Finally, add in methods being registered */
1336         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1337                 (*method)->agent[i] = agent_priv;
1338
1339         return 0;
1340
1341 error3:
1342         /* Remove any methods for this mad agent */
1343         remove_methods_mad_agent(*method, agent_priv);
1344         /* Now, check to see if there are any methods in use */
1345         if (!check_method_table(*method)) {
1346                 /* If not, release management method table */
1347                 kfree(*method);
1348                 *method = NULL;
1349         }
1350         ret = -EINVAL;
1351         goto error1;
1352 error2:
1353         kfree(*class);
1354         *class = NULL;
1355 error1:
1356         return ret;
1357 }
1358
1359 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1360                            struct ib_mad_agent_private *agent_priv)
1361 {
1362         struct ib_mad_port_private *port_priv;
1363         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1364         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1365         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1366         struct ib_mad_mgmt_method_table **method;
1367         int i, ret = -ENOMEM;
1368         u8 vclass;
1369
1370         /* "New" vendor (with OUI) class */
1371         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1372         port_priv = agent_priv->qp_info->port_priv;
1373         vendor_table = &port_priv->version[
1374                                 mad_reg_req->mgmt_class_version].vendor;
1375         if (!*vendor_table) {
1376                 /* Allocate mgmt vendor class table for "new" class version */
1377                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1378                 if (!vendor) {
1379                         printk(KERN_ERR PFX "No memory for "
1380                                "ib_mad_mgmt_vendor_class_table\n");
1381                         goto error1;
1382                 }
1383
1384                 *vendor_table = vendor;
1385         }
1386         if (!(*vendor_table)->vendor_class[vclass]) {
1387                 /* Allocate table for this management vendor class */
1388                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1389                 if (!vendor_class) {
1390                         printk(KERN_ERR PFX "No memory for "
1391                                "ib_mad_mgmt_vendor_class\n");
1392                         goto error2;
1393                 }
1394
1395                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1396         }
1397         for (i = 0; i < MAX_MGMT_OUI; i++) {
1398                 /* Is there matching OUI for this vendor class ? */
1399                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1400                             mad_reg_req->oui, 3)) {
1401                         method = &(*vendor_table)->vendor_class[
1402                                                 vclass]->method_table[i];
1403                         BUG_ON(!*method);
1404                         goto check_in_use;
1405                 }
1406         }
1407         for (i = 0; i < MAX_MGMT_OUI; i++) {
1408                 /* OUI slot available ? */
1409                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1410                                 vclass]->oui[i])) {
1411                         method = &(*vendor_table)->vendor_class[
1412                                 vclass]->method_table[i];
1413                         BUG_ON(*method);
1414                         /* Allocate method table for this OUI */
1415                         if ((ret = allocate_method_table(method)))
1416                                 goto error3;
1417                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1418                                mad_reg_req->oui, 3);
1419                         goto check_in_use;
1420                 }
1421         }
1422         printk(KERN_ERR PFX "All OUI slots in use\n");
1423         goto error3;
1424
1425 check_in_use:
1426         /* Now, make sure methods are not already in use */
1427         if (method_in_use(method, mad_reg_req))
1428                 goto error4;
1429
1430         /* Finally, add in methods being registered */
1431         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1432                 (*method)->agent[i] = agent_priv;
1433
1434         return 0;
1435
1436 error4:
1437         /* Remove any methods for this mad agent */
1438         remove_methods_mad_agent(*method, agent_priv);
1439         /* Now, check to see if there are any methods in use */
1440         if (!check_method_table(*method)) {
1441                 /* If not, release management method table */
1442                 kfree(*method);
1443                 *method = NULL;
1444         }
1445         ret = -EINVAL;
1446 error3:
1447         if (vendor_class) {
1448                 (*vendor_table)->vendor_class[vclass] = NULL;
1449                 kfree(vendor_class);
1450         }
1451 error2:
1452         if (vendor) {
1453                 *vendor_table = NULL;
1454                 kfree(vendor);
1455         }
1456 error1:
1457         return ret;
1458 }
1459
1460 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1461 {
1462         struct ib_mad_port_private *port_priv;
1463         struct ib_mad_mgmt_class_table *class;
1464         struct ib_mad_mgmt_method_table *method;
1465         struct ib_mad_mgmt_vendor_class_table *vendor;
1466         struct ib_mad_mgmt_vendor_class *vendor_class;
1467         int index;
1468         u8 mgmt_class;
1469
1470         /*
1471          * Was MAD registration request supplied
1472          * with original registration ?
1473          */
1474         if (!agent_priv->reg_req) {
1475                 goto out;
1476         }
1477
1478         port_priv = agent_priv->qp_info->port_priv;
1479         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1480         class = port_priv->version[
1481                         agent_priv->reg_req->mgmt_class_version].class;
1482         if (!class)
1483                 goto vendor_check;
1484
1485         method = class->method_table[mgmt_class];
1486         if (method) {
1487                 /* Remove any methods for this mad agent */
1488                 remove_methods_mad_agent(method, agent_priv);
1489                 /* Now, check to see if there are any methods still in use */
1490                 if (!check_method_table(method)) {
1491                         /* If not, release management method table */
1492                          kfree(method);
1493                          class->method_table[mgmt_class] = NULL;
1494                          /* Any management classes left ? */
1495                         if (!check_class_table(class)) {
1496                                 /* If not, release management class table */
1497                                 kfree(class);
1498                                 port_priv->version[
1499                                         agent_priv->reg_req->
1500                                         mgmt_class_version].class = NULL;
1501                         }
1502                 }
1503         }
1504
1505 vendor_check:
1506         if (!is_vendor_class(mgmt_class))
1507                 goto out;
1508
1509         /* normalize mgmt_class to vendor range 2 */
1510         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1511         vendor = port_priv->version[
1512                         agent_priv->reg_req->mgmt_class_version].vendor;
1513
1514         if (!vendor)
1515                 goto out;
1516
1517         vendor_class = vendor->vendor_class[mgmt_class];
1518         if (vendor_class) {
1519                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1520                 if (index < 0)
1521                         goto out;
1522                 method = vendor_class->method_table[index];
1523                 if (method) {
1524                         /* Remove any methods for this mad agent */
1525                         remove_methods_mad_agent(method, agent_priv);
1526                         /*
1527                          * Now, check to see if there are
1528                          * any methods still in use
1529                          */
1530                         if (!check_method_table(method)) {
1531                                 /* If not, release management method table */
1532                                 kfree(method);
1533                                 vendor_class->method_table[index] = NULL;
1534                                 memset(vendor_class->oui[index], 0, 3);
1535                                 /* Any OUIs left ? */
1536                                 if (!check_vendor_class(vendor_class)) {
1537                                         /* If not, release vendor class table */
1538                                         kfree(vendor_class);
1539                                         vendor->vendor_class[mgmt_class] = NULL;
1540                                         /* Any other vendor classes left ? */
1541                                         if (!check_vendor_table(vendor)) {
1542                                                 kfree(vendor);
1543                                                 port_priv->version[
1544                                                         agent_priv->reg_req->
1545                                                         mgmt_class_version].
1546                                                         vendor = NULL;
1547                                         }
1548                                 }
1549                         }
1550                 }
1551         }
1552
1553 out:
1554         return;
1555 }
1556
1557 static struct ib_mad_agent_private *
1558 find_mad_agent(struct ib_mad_port_private *port_priv,
1559                struct ib_mad *mad)
1560 {
1561         struct ib_mad_agent_private *mad_agent = NULL;
1562         unsigned long flags;
1563
1564         spin_lock_irqsave(&port_priv->reg_lock, flags);
1565         if (ib_response_mad(mad)) {
1566                 u32 hi_tid;
1567                 struct ib_mad_agent_private *entry;
1568
1569                 /*
1570                  * Routing is based on high 32 bits of transaction ID
1571                  * of MAD.
1572                  */
1573                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1574                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1575                         if (entry->agent.hi_tid == hi_tid) {
1576                                 mad_agent = entry;
1577                                 break;
1578                         }
1579                 }
1580         } else {
1581                 struct ib_mad_mgmt_class_table *class;
1582                 struct ib_mad_mgmt_method_table *method;
1583                 struct ib_mad_mgmt_vendor_class_table *vendor;
1584                 struct ib_mad_mgmt_vendor_class *vendor_class;
1585                 struct ib_vendor_mad *vendor_mad;
1586                 int index;
1587
1588                 /*
1589                  * Routing is based on version, class, and method
1590                  * For "newer" vendor MADs, also based on OUI
1591                  */
1592                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1593                         goto out;
1594                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1595                         class = port_priv->version[
1596                                         mad->mad_hdr.class_version].class;
1597                         if (!class)
1598                                 goto out;
1599                         method = class->method_table[convert_mgmt_class(
1600                                                         mad->mad_hdr.mgmt_class)];
1601                         if (method)
1602                                 mad_agent = method->agent[mad->mad_hdr.method &
1603                                                           ~IB_MGMT_METHOD_RESP];
1604                 } else {
1605                         vendor = port_priv->version[
1606                                         mad->mad_hdr.class_version].vendor;
1607                         if (!vendor)
1608                                 goto out;
1609                         vendor_class = vendor->vendor_class[vendor_class_index(
1610                                                 mad->mad_hdr.mgmt_class)];
1611                         if (!vendor_class)
1612                                 goto out;
1613                         /* Find matching OUI */
1614                         vendor_mad = (struct ib_vendor_mad *)mad;
1615                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1616                         if (index == -1)
1617                                 goto out;
1618                         method = vendor_class->method_table[index];
1619                         if (method) {
1620                                 mad_agent = method->agent[mad->mad_hdr.method &
1621                                                           ~IB_MGMT_METHOD_RESP];
1622                         }
1623                 }
1624         }
1625
1626         if (mad_agent) {
1627                 if (mad_agent->agent.recv_handler)
1628                         atomic_inc(&mad_agent->refcount);
1629                 else {
1630                         printk(KERN_NOTICE PFX "No receive handler for client "
1631                                "%p on port %d\n",
1632                                &mad_agent->agent, port_priv->port_num);
1633                         mad_agent = NULL;
1634                 }
1635         }
1636 out:
1637         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1638
1639         return mad_agent;
1640 }
1641
1642 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1643 {
1644         int valid = 0;
1645
1646         /* Make sure MAD base version is understood */
1647         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1648                 printk(KERN_ERR PFX "MAD received with unsupported base "
1649                        "version %d\n", mad->mad_hdr.base_version);
1650                 goto out;
1651         }
1652
1653         /* Filter SMI packets sent to other than QP0 */
1654         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1655             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1656                 if (qp_num == 0)
1657                         valid = 1;
1658         } else {
1659                 /* Filter GSI packets sent to QP0 */
1660                 if (qp_num != 0)
1661                         valid = 1;
1662         }
1663
1664 out:
1665         return valid;
1666 }
1667
1668 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1669                        struct ib_mad_hdr *mad_hdr)
1670 {
1671         struct ib_rmpp_mad *rmpp_mad;
1672
1673         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1674         return !mad_agent_priv->agent.rmpp_version ||
1675                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1676                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1677                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1678 }
1679
1680 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1681                                      struct ib_mad_recv_wc *rwc)
1682 {
1683         return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1684                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1685 }
1686
1687 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1688                                    struct ib_mad_send_wr_private *wr,
1689                                    struct ib_mad_recv_wc *rwc )
1690 {
1691         struct ib_ah_attr attr;
1692         u8 send_resp, rcv_resp;
1693         union ib_gid sgid;
1694         struct ib_device *device = mad_agent_priv->agent.device;
1695         u8 port_num = mad_agent_priv->agent.port_num;
1696         u8 lmc;
1697
1698         send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1699         rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1700
1701         if (send_resp == rcv_resp)
1702                 /* both requests, or both responses. GIDs different */
1703                 return 0;
1704
1705         if (ib_query_ah(wr->send_buf.ah, &attr))
1706                 /* Assume not equal, to avoid false positives. */
1707                 return 0;
1708
1709         if (!!(attr.ah_flags & IB_AH_GRH) !=
1710             !!(rwc->wc->wc_flags & IB_WC_GRH))
1711                 /* one has GID, other does not.  Assume different */
1712                 return 0;
1713
1714         if (!send_resp && rcv_resp) {
1715                 /* is request/response. */
1716                 if (!(attr.ah_flags & IB_AH_GRH)) {
1717                         if (ib_get_cached_lmc(device, port_num, &lmc))
1718                                 return 0;
1719                         return (!lmc || !((attr.src_path_bits ^
1720                                            rwc->wc->dlid_path_bits) &
1721                                           ((1 << lmc) - 1)));
1722                 } else {
1723                         if (ib_get_cached_gid(device, port_num,
1724                                               attr.grh.sgid_index, &sgid))
1725                                 return 0;
1726                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1727                                        16);
1728                 }
1729         }
1730
1731         if (!(attr.ah_flags & IB_AH_GRH))
1732                 return attr.dlid == rwc->wc->slid;
1733         else
1734                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1735                                16);
1736 }
1737
1738 static inline int is_direct(u8 class)
1739 {
1740         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1741 }
1742
1743 struct ib_mad_send_wr_private*
1744 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1745                  struct ib_mad_recv_wc *wc)
1746 {
1747         struct ib_mad_send_wr_private *wr;
1748         struct ib_mad *mad;
1749
1750         mad = (struct ib_mad *)wc->recv_buf.mad;
1751
1752         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1753                 if ((wr->tid == mad->mad_hdr.tid) &&
1754                     rcv_has_same_class(wr, wc) &&
1755                     /*
1756                      * Don't check GID for direct routed MADs.
1757                      * These might have permissive LIDs.
1758                      */
1759                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1760                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1761                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1762         }
1763
1764         /*
1765          * It's possible to receive the response before we've
1766          * been notified that the send has completed
1767          */
1768         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1769                 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1770                     wr->tid == mad->mad_hdr.tid &&
1771                     wr->timeout &&
1772                     rcv_has_same_class(wr, wc) &&
1773                     /*
1774                      * Don't check GID for direct routed MADs.
1775                      * These might have permissive LIDs.
1776                      */
1777                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1778                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1779                         /* Verify request has not been canceled */
1780                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1781         }
1782         return NULL;
1783 }
1784
1785 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1786 {
1787         mad_send_wr->timeout = 0;
1788         if (mad_send_wr->refcount == 1)
1789                 list_move_tail(&mad_send_wr->agent_list,
1790                               &mad_send_wr->mad_agent_priv->done_list);
1791 }
1792
1793 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1794                                  struct ib_mad_recv_wc *mad_recv_wc)
1795 {
1796         struct ib_mad_send_wr_private *mad_send_wr;
1797         struct ib_mad_send_wc mad_send_wc;
1798         unsigned long flags;
1799
1800         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1801         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1802         if (mad_agent_priv->agent.rmpp_version) {
1803                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1804                                                       mad_recv_wc);
1805                 if (!mad_recv_wc) {
1806                         deref_mad_agent(mad_agent_priv);
1807                         return;
1808                 }
1809         }
1810
1811         /* Complete corresponding request */
1812         if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1813                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1814                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1815                 if (!mad_send_wr) {
1816                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1817                         ib_free_recv_mad(mad_recv_wc);
1818                         deref_mad_agent(mad_agent_priv);
1819                         return;
1820                 }
1821                 ib_mark_mad_done(mad_send_wr);
1822                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1823
1824                 /* Defined behavior is to complete response before request */
1825                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1826                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1827                                                    mad_recv_wc);
1828                 atomic_dec(&mad_agent_priv->refcount);
1829
1830                 mad_send_wc.status = IB_WC_SUCCESS;
1831                 mad_send_wc.vendor_err = 0;
1832                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1833                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1834         } else {
1835                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1836                                                    mad_recv_wc);
1837                 deref_mad_agent(mad_agent_priv);
1838         }
1839 }
1840
1841 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1842                                      struct ib_wc *wc)
1843 {
1844         struct ib_mad_qp_info *qp_info;
1845         struct ib_mad_private_header *mad_priv_hdr;
1846         struct ib_mad_private *recv, *response = NULL;
1847         struct ib_mad_list_head *mad_list;
1848         struct ib_mad_agent_private *mad_agent;
1849         int port_num;
1850
1851         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1852         qp_info = mad_list->mad_queue->qp_info;
1853         dequeue_mad(mad_list);
1854
1855         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1856                                     mad_list);
1857         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1858         ib_dma_unmap_single(port_priv->device,
1859                             recv->header.mapping,
1860                             sizeof(struct ib_mad_private) -
1861                               sizeof(struct ib_mad_private_header),
1862                             DMA_FROM_DEVICE);
1863
1864         /* Setup MAD receive work completion from "normal" work completion */
1865         recv->header.wc = *wc;
1866         recv->header.recv_wc.wc = &recv->header.wc;
1867         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1868         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1869         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1870
1871         if (atomic_read(&qp_info->snoop_count))
1872                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1873
1874         /* Validate MAD */
1875         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1876                 goto out;
1877
1878         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1879         if (!response) {
1880                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1881                        "for response buffer\n");
1882                 goto out;
1883         }
1884
1885         if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1886                 port_num = wc->port_num;
1887         else
1888                 port_num = port_priv->port_num;
1889
1890         if (recv->mad.mad.mad_hdr.mgmt_class ==
1891             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1892                 enum smi_forward_action retsmi;
1893
1894                 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1895                                            port_priv->device->node_type,
1896                                            port_num,
1897                                            port_priv->device->phys_port_cnt) ==
1898                                            IB_SMI_DISCARD)
1899                         goto out;
1900
1901                 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1902                 if (retsmi == IB_SMI_LOCAL)
1903                         goto local;
1904
1905                 if (retsmi == IB_SMI_SEND) { /* don't forward */
1906                         if (smi_handle_dr_smp_send(&recv->mad.smp,
1907                                                    port_priv->device->node_type,
1908                                                    port_num) == IB_SMI_DISCARD)
1909                                 goto out;
1910
1911                         if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1912                                 goto out;
1913                 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1914                         /* forward case for switches */
1915                         memcpy(response, recv, sizeof(*response));
1916                         response->header.recv_wc.wc = &response->header.wc;
1917                         response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1918                         response->header.recv_wc.recv_buf.grh = &response->grh;
1919
1920                         agent_send_response(&response->mad.mad,
1921                                             &response->grh, wc,
1922                                             port_priv->device,
1923                                             smi_get_fwd_port(&recv->mad.smp),
1924                                             qp_info->qp->qp_num);
1925
1926                         goto out;
1927                 }
1928         }
1929
1930 local:
1931         /* Give driver "right of first refusal" on incoming MAD */
1932         if (port_priv->device->process_mad) {
1933                 int ret;
1934
1935                 ret = port_priv->device->process_mad(port_priv->device, 0,
1936                                                      port_priv->port_num,
1937                                                      wc, &recv->grh,
1938                                                      &recv->mad.mad,
1939                                                      &response->mad.mad);
1940                 if (ret & IB_MAD_RESULT_SUCCESS) {
1941                         if (ret & IB_MAD_RESULT_CONSUMED)
1942                                 goto out;
1943                         if (ret & IB_MAD_RESULT_REPLY) {
1944                                 agent_send_response(&response->mad.mad,
1945                                                     &recv->grh, wc,
1946                                                     port_priv->device,
1947                                                     port_num,
1948                                                     qp_info->qp->qp_num);
1949                                 goto out;
1950                         }
1951                 }
1952         }
1953
1954         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1955         if (mad_agent) {
1956                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1957                 /*
1958                  * recv is freed up in error cases in ib_mad_complete_recv
1959                  * or via recv_handler in ib_mad_complete_recv()
1960                  */
1961                 recv = NULL;
1962         }
1963
1964 out:
1965         /* Post another receive request for this QP */
1966         if (response) {
1967                 ib_mad_post_receive_mads(qp_info, response);
1968                 if (recv)
1969                         kmem_cache_free(ib_mad_cache, recv);
1970         } else
1971                 ib_mad_post_receive_mads(qp_info, recv);
1972 }
1973
1974 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1975 {
1976         struct ib_mad_send_wr_private *mad_send_wr;
1977         unsigned long delay;
1978
1979         if (list_empty(&mad_agent_priv->wait_list)) {
1980                 __cancel_delayed_work(&mad_agent_priv->timed_work);
1981         } else {
1982                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1983                                          struct ib_mad_send_wr_private,
1984                                          agent_list);
1985
1986                 if (time_after(mad_agent_priv->timeout,
1987                                mad_send_wr->timeout)) {
1988                         mad_agent_priv->timeout = mad_send_wr->timeout;
1989                         __cancel_delayed_work(&mad_agent_priv->timed_work);
1990                         delay = mad_send_wr->timeout - jiffies;
1991                         if ((long)delay <= 0)
1992                                 delay = 1;
1993                         queue_delayed_work(mad_agent_priv->qp_info->
1994                                            port_priv->wq,
1995                                            &mad_agent_priv->timed_work, delay);
1996                 }
1997         }
1998 }
1999
2000 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2001 {
2002         struct ib_mad_agent_private *mad_agent_priv;
2003         struct ib_mad_send_wr_private *temp_mad_send_wr;
2004         struct list_head *list_item;
2005         unsigned long delay;
2006
2007         mad_agent_priv = mad_send_wr->mad_agent_priv;
2008         list_del(&mad_send_wr->agent_list);
2009
2010         delay = mad_send_wr->timeout;
2011         mad_send_wr->timeout += jiffies;
2012
2013         if (delay) {
2014                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2015                         temp_mad_send_wr = list_entry(list_item,
2016                                                 struct ib_mad_send_wr_private,
2017                                                 agent_list);
2018                         if (time_after(mad_send_wr->timeout,
2019                                        temp_mad_send_wr->timeout))
2020                                 break;
2021                 }
2022         }
2023         else
2024                 list_item = &mad_agent_priv->wait_list;
2025         list_add(&mad_send_wr->agent_list, list_item);
2026
2027         /* Reschedule a work item if we have a shorter timeout */
2028         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2029                 __cancel_delayed_work(&mad_agent_priv->timed_work);
2030                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2031                                    &mad_agent_priv->timed_work, delay);
2032         }
2033 }
2034
2035 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2036                           int timeout_ms)
2037 {
2038         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2039         wait_for_response(mad_send_wr);
2040 }
2041
2042 /*
2043  * Process a send work completion
2044  */
2045 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2046                              struct ib_mad_send_wc *mad_send_wc)
2047 {
2048         struct ib_mad_agent_private     *mad_agent_priv;
2049         unsigned long                   flags;
2050         int                             ret;
2051
2052         mad_agent_priv = mad_send_wr->mad_agent_priv;
2053         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2054         if (mad_agent_priv->agent.rmpp_version) {
2055                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2056                 if (ret == IB_RMPP_RESULT_CONSUMED)
2057                         goto done;
2058         } else
2059                 ret = IB_RMPP_RESULT_UNHANDLED;
2060
2061         if (mad_send_wc->status != IB_WC_SUCCESS &&
2062             mad_send_wr->status == IB_WC_SUCCESS) {
2063                 mad_send_wr->status = mad_send_wc->status;
2064                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2065         }
2066
2067         if (--mad_send_wr->refcount > 0) {
2068                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2069                     mad_send_wr->status == IB_WC_SUCCESS) {
2070                         wait_for_response(mad_send_wr);
2071                 }
2072                 goto done;
2073         }
2074
2075         /* Remove send from MAD agent and notify client of completion */
2076         list_del(&mad_send_wr->agent_list);
2077         adjust_timeout(mad_agent_priv);
2078         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2079
2080         if (mad_send_wr->status != IB_WC_SUCCESS )
2081                 mad_send_wc->status = mad_send_wr->status;
2082         if (ret == IB_RMPP_RESULT_INTERNAL)
2083                 ib_rmpp_send_handler(mad_send_wc);
2084         else
2085                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2086                                                    mad_send_wc);
2087
2088         /* Release reference on agent taken when sending */
2089         deref_mad_agent(mad_agent_priv);
2090         return;
2091 done:
2092         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2093 }
2094
2095 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2096                                      struct ib_wc *wc)
2097 {
2098         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2099         struct ib_mad_list_head         *mad_list;
2100         struct ib_mad_qp_info           *qp_info;
2101         struct ib_mad_queue             *send_queue;
2102         struct ib_send_wr               *bad_send_wr;
2103         struct ib_mad_send_wc           mad_send_wc;
2104         unsigned long flags;
2105         int ret;
2106
2107         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2108         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2109                                    mad_list);
2110         send_queue = mad_list->mad_queue;
2111         qp_info = send_queue->qp_info;
2112
2113 retry:
2114         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2115                             mad_send_wr->header_mapping,
2116                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2117         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2118                             mad_send_wr->payload_mapping,
2119                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2120         queued_send_wr = NULL;
2121         spin_lock_irqsave(&send_queue->lock, flags);
2122         list_del(&mad_list->list);
2123
2124         /* Move queued send to the send queue */
2125         if (send_queue->count-- > send_queue->max_active) {
2126                 mad_list = container_of(qp_info->overflow_list.next,
2127                                         struct ib_mad_list_head, list);
2128                 queued_send_wr = container_of(mad_list,
2129                                         struct ib_mad_send_wr_private,
2130                                         mad_list);
2131                 list_move_tail(&mad_list->list, &send_queue->list);
2132         }
2133         spin_unlock_irqrestore(&send_queue->lock, flags);
2134
2135         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2136         mad_send_wc.status = wc->status;
2137         mad_send_wc.vendor_err = wc->vendor_err;
2138         if (atomic_read(&qp_info->snoop_count))
2139                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2140                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2141         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2142
2143         if (queued_send_wr) {
2144                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2145                                    &bad_send_wr);
2146                 if (ret) {
2147                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2148                         mad_send_wr = queued_send_wr;
2149                         wc->status = IB_WC_LOC_QP_OP_ERR;
2150                         goto retry;
2151                 }
2152         }
2153 }
2154
2155 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2156 {
2157         struct ib_mad_send_wr_private *mad_send_wr;
2158         struct ib_mad_list_head *mad_list;
2159         unsigned long flags;
2160
2161         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2162         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2163                 mad_send_wr = container_of(mad_list,
2164                                            struct ib_mad_send_wr_private,
2165                                            mad_list);
2166                 mad_send_wr->retry = 1;
2167         }
2168         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2169 }
2170
2171 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2172                               struct ib_wc *wc)
2173 {
2174         struct ib_mad_list_head *mad_list;
2175         struct ib_mad_qp_info *qp_info;
2176         struct ib_mad_send_wr_private *mad_send_wr;
2177         int ret;
2178
2179         /* Determine if failure was a send or receive */
2180         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2181         qp_info = mad_list->mad_queue->qp_info;
2182         if (mad_list->mad_queue == &qp_info->recv_queue)
2183                 /*
2184                  * Receive errors indicate that the QP has entered the error
2185                  * state - error handling/shutdown code will cleanup
2186                  */
2187                 return;
2188
2189         /*
2190          * Send errors will transition the QP to SQE - move
2191          * QP to RTS and repost flushed work requests
2192          */
2193         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2194                                    mad_list);
2195         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2196                 if (mad_send_wr->retry) {
2197                         /* Repost send */
2198                         struct ib_send_wr *bad_send_wr;
2199
2200                         mad_send_wr->retry = 0;
2201                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2202                                         &bad_send_wr);
2203                         if (ret)
2204                                 ib_mad_send_done_handler(port_priv, wc);
2205                 } else
2206                         ib_mad_send_done_handler(port_priv, wc);
2207         } else {
2208                 struct ib_qp_attr *attr;
2209
2210                 /* Transition QP to RTS and fail offending send */
2211                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2212                 if (attr) {
2213                         attr->qp_state = IB_QPS_RTS;
2214                         attr->cur_qp_state = IB_QPS_SQE;
2215                         ret = ib_modify_qp(qp_info->qp, attr,
2216                                            IB_QP_STATE | IB_QP_CUR_STATE);
2217                         kfree(attr);
2218                         if (ret)
2219                                 printk(KERN_ERR PFX "mad_error_handler - "
2220                                        "ib_modify_qp to RTS : %d\n", ret);
2221                         else
2222                                 mark_sends_for_retry(qp_info);
2223                 }
2224                 ib_mad_send_done_handler(port_priv, wc);
2225         }
2226 }
2227
2228 /*
2229  * IB MAD completion callback
2230  */
2231 static void ib_mad_completion_handler(struct work_struct *work)
2232 {
2233         struct ib_mad_port_private *port_priv;
2234         struct ib_wc wc;
2235
2236         port_priv = container_of(work, struct ib_mad_port_private, work);
2237         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2238
2239         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2240                 if (wc.status == IB_WC_SUCCESS) {
2241                         switch (wc.opcode) {
2242                         case IB_WC_SEND:
2243                                 ib_mad_send_done_handler(port_priv, &wc);
2244                                 break;
2245                         case IB_WC_RECV:
2246                                 ib_mad_recv_done_handler(port_priv, &wc);
2247                                 break;
2248                         default:
2249                                 BUG_ON(1);
2250                                 break;
2251                         }
2252                 } else
2253                         mad_error_handler(port_priv, &wc);
2254         }
2255 }
2256
2257 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2258 {
2259         unsigned long flags;
2260         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2261         struct ib_mad_send_wc mad_send_wc;
2262         struct list_head cancel_list;
2263
2264         INIT_LIST_HEAD(&cancel_list);
2265
2266         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2267         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2268                                  &mad_agent_priv->send_list, agent_list) {
2269                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2270                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2271                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2272                 }
2273         }
2274
2275         /* Empty wait list to prevent receives from finding a request */
2276         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2277         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2278
2279         /* Report all cancelled requests */
2280         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2281         mad_send_wc.vendor_err = 0;
2282
2283         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2284                                  &cancel_list, agent_list) {
2285                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2286                 list_del(&mad_send_wr->agent_list);
2287                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2288                                                    &mad_send_wc);
2289                 atomic_dec(&mad_agent_priv->refcount);
2290         }
2291 }
2292
2293 static struct ib_mad_send_wr_private*
2294 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2295              struct ib_mad_send_buf *send_buf)
2296 {
2297         struct ib_mad_send_wr_private *mad_send_wr;
2298
2299         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2300                             agent_list) {
2301                 if (&mad_send_wr->send_buf == send_buf)
2302                         return mad_send_wr;
2303         }
2304
2305         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2306                             agent_list) {
2307                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2308                     &mad_send_wr->send_buf == send_buf)
2309                         return mad_send_wr;
2310         }
2311         return NULL;
2312 }
2313
2314 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2315                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2316 {
2317         struct ib_mad_agent_private *mad_agent_priv;
2318         struct ib_mad_send_wr_private *mad_send_wr;
2319         unsigned long flags;
2320         int active;
2321
2322         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2323                                       agent);
2324         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2325         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2326         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2327                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2328                 return -EINVAL;
2329         }
2330
2331         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2332         if (!timeout_ms) {
2333                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2334                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2335         }
2336
2337         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2338         if (active)
2339                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2340         else
2341                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2342
2343         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2344         return 0;
2345 }
2346 EXPORT_SYMBOL(ib_modify_mad);
2347
2348 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2349                    struct ib_mad_send_buf *send_buf)
2350 {
2351         ib_modify_mad(mad_agent, send_buf, 0);
2352 }
2353 EXPORT_SYMBOL(ib_cancel_mad);
2354
2355 static void local_completions(struct work_struct *work)
2356 {
2357         struct ib_mad_agent_private *mad_agent_priv;
2358         struct ib_mad_local_private *local;
2359         struct ib_mad_agent_private *recv_mad_agent;
2360         unsigned long flags;
2361         int free_mad;
2362         struct ib_wc wc;
2363         struct ib_mad_send_wc mad_send_wc;
2364
2365         mad_agent_priv =
2366                 container_of(work, struct ib_mad_agent_private, local_work);
2367
2368         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2369         while (!list_empty(&mad_agent_priv->local_list)) {
2370                 local = list_entry(mad_agent_priv->local_list.next,
2371                                    struct ib_mad_local_private,
2372                                    completion_list);
2373                 list_del(&local->completion_list);
2374                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2375                 free_mad = 0;
2376                 if (local->mad_priv) {
2377                         recv_mad_agent = local->recv_mad_agent;
2378                         if (!recv_mad_agent) {
2379                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2380                                 free_mad = 1;
2381                                 goto local_send_completion;
2382                         }
2383
2384                         /*
2385                          * Defined behavior is to complete response
2386                          * before request
2387                          */
2388                         build_smp_wc(recv_mad_agent->agent.qp,
2389                                      (unsigned long) local->mad_send_wr,
2390                                      be16_to_cpu(IB_LID_PERMISSIVE),
2391                                      0, recv_mad_agent->agent.port_num, &wc);
2392
2393                         local->mad_priv->header.recv_wc.wc = &wc;
2394                         local->mad_priv->header.recv_wc.mad_len =
2395                                                 sizeof(struct ib_mad);
2396                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2397                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2398                                  &local->mad_priv->header.recv_wc.rmpp_list);
2399                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2400                         local->mad_priv->header.recv_wc.recv_buf.mad =
2401                                                 &local->mad_priv->mad.mad;
2402                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2403                                 snoop_recv(recv_mad_agent->qp_info,
2404                                           &local->mad_priv->header.recv_wc,
2405                                            IB_MAD_SNOOP_RECVS);
2406                         recv_mad_agent->agent.recv_handler(
2407                                                 &recv_mad_agent->agent,
2408                                                 &local->mad_priv->header.recv_wc);
2409                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2410                         atomic_dec(&recv_mad_agent->refcount);
2411                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2412                 }
2413
2414 local_send_completion:
2415                 /* Complete send */
2416                 mad_send_wc.status = IB_WC_SUCCESS;
2417                 mad_send_wc.vendor_err = 0;
2418                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2419                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2420                         snoop_send(mad_agent_priv->qp_info,
2421                                    &local->mad_send_wr->send_buf,
2422                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2423                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2424                                                    &mad_send_wc);
2425
2426                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2427                 atomic_dec(&mad_agent_priv->refcount);
2428                 if (free_mad)
2429                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2430                 kfree(local);
2431         }
2432         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2433 }
2434
2435 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2436 {
2437         int ret;
2438
2439         if (!mad_send_wr->retries_left)
2440                 return -ETIMEDOUT;
2441
2442         mad_send_wr->retries_left--;
2443         mad_send_wr->send_buf.retries++;
2444
2445         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2446
2447         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2448                 ret = ib_retry_rmpp(mad_send_wr);
2449                 switch (ret) {
2450                 case IB_RMPP_RESULT_UNHANDLED:
2451                         ret = ib_send_mad(mad_send_wr);
2452                         break;
2453                 case IB_RMPP_RESULT_CONSUMED:
2454                         ret = 0;
2455                         break;
2456                 default:
2457                         ret = -ECOMM;
2458                         break;
2459                 }
2460         } else
2461                 ret = ib_send_mad(mad_send_wr);
2462
2463         if (!ret) {
2464                 mad_send_wr->refcount++;
2465                 list_add_tail(&mad_send_wr->agent_list,
2466                               &mad_send_wr->mad_agent_priv->send_list);
2467         }
2468         return ret;
2469 }
2470
2471 static void timeout_sends(struct work_struct *work)
2472 {
2473         struct ib_mad_agent_private *mad_agent_priv;
2474         struct ib_mad_send_wr_private *mad_send_wr;
2475         struct ib_mad_send_wc mad_send_wc;
2476         unsigned long flags, delay;
2477
2478         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2479                                       timed_work.work);
2480         mad_send_wc.vendor_err = 0;
2481
2482         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2483         while (!list_empty(&mad_agent_priv->wait_list)) {
2484                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2485                                          struct ib_mad_send_wr_private,
2486                                          agent_list);
2487
2488                 if (time_after(mad_send_wr->timeout, jiffies)) {
2489                         delay = mad_send_wr->timeout - jiffies;
2490                         if ((long)delay <= 0)
2491                                 delay = 1;
2492                         queue_delayed_work(mad_agent_priv->qp_info->
2493                                            port_priv->wq,
2494                                            &mad_agent_priv->timed_work, delay);
2495                         break;
2496                 }
2497
2498                 list_del(&mad_send_wr->agent_list);
2499                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2500                     !retry_send(mad_send_wr))
2501                         continue;
2502
2503                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2504
2505                 if (mad_send_wr->status == IB_WC_SUCCESS)
2506                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2507                 else
2508                         mad_send_wc.status = mad_send_wr->status;
2509                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2510                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2511                                                    &mad_send_wc);
2512
2513                 atomic_dec(&mad_agent_priv->refcount);
2514                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2515         }
2516         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2517 }
2518
2519 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2520 {
2521         struct ib_mad_port_private *port_priv = cq->cq_context;
2522         unsigned long flags;
2523
2524         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2525         if (!list_empty(&port_priv->port_list))
2526                 queue_work(port_priv->wq, &port_priv->work);
2527         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2528 }
2529
2530 /*
2531  * Allocate receive MADs and post receive WRs for them
2532  */
2533 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2534                                     struct ib_mad_private *mad)
2535 {
2536         unsigned long flags;
2537         int post, ret;
2538         struct ib_mad_private *mad_priv;
2539         struct ib_sge sg_list;
2540         struct ib_recv_wr recv_wr, *bad_recv_wr;
2541         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2542
2543         /* Initialize common scatter list fields */
2544         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2545         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2546
2547         /* Initialize common receive WR fields */
2548         recv_wr.next = NULL;
2549         recv_wr.sg_list = &sg_list;
2550         recv_wr.num_sge = 1;
2551
2552         do {
2553                 /* Allocate and map receive buffer */
2554                 if (mad) {
2555                         mad_priv = mad;
2556                         mad = NULL;
2557                 } else {
2558                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2559                         if (!mad_priv) {
2560                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2561                                 ret = -ENOMEM;
2562                                 break;
2563                         }
2564                 }
2565                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2566                                                  &mad_priv->grh,
2567                                                  sizeof *mad_priv -
2568                                                    sizeof mad_priv->header,
2569                                                  DMA_FROM_DEVICE);
2570                 mad_priv->header.mapping = sg_list.addr;
2571                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2572                 mad_priv->header.mad_list.mad_queue = recv_queue;
2573
2574                 /* Post receive WR */
2575                 spin_lock_irqsave(&recv_queue->lock, flags);
2576                 post = (++recv_queue->count < recv_queue->max_active);
2577                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2578                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2579                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2580                 if (ret) {
2581                         spin_lock_irqsave(&recv_queue->lock, flags);
2582                         list_del(&mad_priv->header.mad_list.list);
2583                         recv_queue->count--;
2584                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2585                         ib_dma_unmap_single(qp_info->port_priv->device,
2586                                             mad_priv->header.mapping,
2587                                             sizeof *mad_priv -
2588                                               sizeof mad_priv->header,
2589                                             DMA_FROM_DEVICE);
2590                         kmem_cache_free(ib_mad_cache, mad_priv);
2591                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2592                         break;
2593                 }
2594         } while (post);
2595
2596         return ret;
2597 }
2598
2599 /*
2600  * Return all the posted receive MADs
2601  */
2602 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2603 {
2604         struct ib_mad_private_header *mad_priv_hdr;
2605         struct ib_mad_private *recv;
2606         struct ib_mad_list_head *mad_list;
2607
2608         if (!qp_info->qp)
2609                 return;
2610
2611         while (!list_empty(&qp_info->recv_queue.list)) {
2612
2613                 mad_list = list_entry(qp_info->recv_queue.list.next,
2614                                       struct ib_mad_list_head, list);
2615                 mad_priv_hdr = container_of(mad_list,
2616                                             struct ib_mad_private_header,
2617                                             mad_list);
2618                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2619                                     header);
2620
2621                 /* Remove from posted receive MAD list */
2622                 list_del(&mad_list->list);
2623
2624                 ib_dma_unmap_single(qp_info->port_priv->device,
2625                                     recv->header.mapping,
2626                                     sizeof(struct ib_mad_private) -
2627                                       sizeof(struct ib_mad_private_header),
2628                                     DMA_FROM_DEVICE);
2629                 kmem_cache_free(ib_mad_cache, recv);
2630         }
2631
2632         qp_info->recv_queue.count = 0;
2633 }
2634
2635 /*
2636  * Start the port
2637  */
2638 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2639 {
2640         int ret, i;
2641         struct ib_qp_attr *attr;
2642         struct ib_qp *qp;
2643
2644         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2645         if (!attr) {
2646                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2647                 return -ENOMEM;
2648         }
2649
2650         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2651                 qp = port_priv->qp_info[i].qp;
2652                 if (!qp)
2653                         continue;
2654
2655                 /*
2656                  * PKey index for QP1 is irrelevant but
2657                  * one is needed for the Reset to Init transition
2658                  */
2659                 attr->qp_state = IB_QPS_INIT;
2660                 attr->pkey_index = 0;
2661                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2662                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2663                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2664                 if (ret) {
2665                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2666                                "INIT: %d\n", i, ret);
2667                         goto out;
2668                 }
2669
2670                 attr->qp_state = IB_QPS_RTR;
2671                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2672                 if (ret) {
2673                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2674                                "RTR: %d\n", i, ret);
2675                         goto out;
2676                 }
2677
2678                 attr->qp_state = IB_QPS_RTS;
2679                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2680                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2681                 if (ret) {
2682                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2683                                "RTS: %d\n", i, ret);
2684                         goto out;
2685                 }
2686         }
2687
2688         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2689         if (ret) {
2690                 printk(KERN_ERR PFX "Failed to request completion "
2691                        "notification: %d\n", ret);
2692                 goto out;
2693         }
2694
2695         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2696                 if (!port_priv->qp_info[i].qp)
2697                         continue;
2698
2699                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2700                 if (ret) {
2701                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2702                         goto out;
2703                 }
2704         }
2705 out:
2706         kfree(attr);
2707         return ret;
2708 }
2709
2710 static void qp_event_handler(struct ib_event *event, void *qp_context)
2711 {
2712         struct ib_mad_qp_info   *qp_info = qp_context;
2713
2714         /* It's worse than that! He's dead, Jim! */
2715         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2716                 event->event, qp_info->qp->qp_num);
2717 }
2718
2719 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2720                            struct ib_mad_queue *mad_queue)
2721 {
2722         mad_queue->qp_info = qp_info;
2723         mad_queue->count = 0;
2724         spin_lock_init(&mad_queue->lock);
2725         INIT_LIST_HEAD(&mad_queue->list);
2726 }
2727
2728 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2729                         struct ib_mad_qp_info *qp_info)
2730 {
2731         qp_info->port_priv = port_priv;
2732         init_mad_queue(qp_info, &qp_info->send_queue);
2733         init_mad_queue(qp_info, &qp_info->recv_queue);
2734         INIT_LIST_HEAD(&qp_info->overflow_list);
2735         spin_lock_init(&qp_info->snoop_lock);
2736         qp_info->snoop_table = NULL;
2737         qp_info->snoop_table_size = 0;
2738         atomic_set(&qp_info->snoop_count, 0);
2739 }
2740
2741 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2742                          enum ib_qp_type qp_type)
2743 {
2744         struct ib_qp_init_attr  qp_init_attr;
2745         int ret;
2746
2747         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2748         qp_init_attr.send_cq = qp_info->port_priv->cq;
2749         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2750         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2751         qp_init_attr.cap.max_send_wr = mad_sendq_size;
2752         qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2753         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2754         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2755         qp_init_attr.qp_type = qp_type;
2756         qp_init_attr.port_num = qp_info->port_priv->port_num;
2757         qp_init_attr.qp_context = qp_info;
2758         qp_init_attr.event_handler = qp_event_handler;
2759         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2760         if (IS_ERR(qp_info->qp)) {
2761                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2762                        get_spl_qp_index(qp_type));
2763                 ret = PTR_ERR(qp_info->qp);
2764                 goto error;
2765         }
2766         /* Use minimum queue sizes unless the CQ is resized */
2767         qp_info->send_queue.max_active = mad_sendq_size;
2768         qp_info->recv_queue.max_active = mad_recvq_size;
2769         return 0;
2770
2771 error:
2772         return ret;
2773 }
2774
2775 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2776 {
2777         if (!qp_info->qp)
2778                 return;
2779
2780         ib_destroy_qp(qp_info->qp);
2781         kfree(qp_info->snoop_table);
2782 }
2783
2784 /*
2785  * Open the port
2786  * Create the QP, PD, MR, and CQ if needed
2787  */
2788 static int ib_mad_port_open(struct ib_device *device,
2789                             int port_num)
2790 {
2791         int ret, cq_size;
2792         struct ib_mad_port_private *port_priv;
2793         unsigned long flags;
2794         char name[sizeof "ib_mad123"];
2795         int has_smi;
2796
2797         /* Create new device info */
2798         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2799         if (!port_priv) {
2800                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2801                 return -ENOMEM;
2802         }
2803
2804         port_priv->device = device;
2805         port_priv->port_num = port_num;
2806         spin_lock_init(&port_priv->reg_lock);
2807         INIT_LIST_HEAD(&port_priv->agent_list);
2808         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2809         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2810
2811         cq_size = mad_sendq_size + mad_recvq_size;
2812         has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2813         if (has_smi)
2814                 cq_size *= 2;
2815
2816         port_priv->cq = ib_create_cq(port_priv->device,
2817                                      ib_mad_thread_completion_handler,
2818                                      NULL, port_priv, cq_size, 0);
2819         if (IS_ERR(port_priv->cq)) {
2820                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2821                 ret = PTR_ERR(port_priv->cq);
2822                 goto error3;
2823         }
2824
2825         port_priv->pd = ib_alloc_pd(device);
2826         if (IS_ERR(port_priv->pd)) {
2827                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2828                 ret = PTR_ERR(port_priv->pd);
2829                 goto error4;
2830         }
2831
2832         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2833         if (IS_ERR(port_priv->mr)) {
2834                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2835                 ret = PTR_ERR(port_priv->mr);
2836                 goto error5;
2837         }
2838
2839         if (has_smi) {
2840                 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2841                 if (ret)
2842                         goto error6;
2843         }
2844         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2845         if (ret)
2846                 goto error7;
2847
2848         snprintf(name, sizeof name, "ib_mad%d", port_num);
2849         port_priv->wq = create_singlethread_workqueue(name);
2850         if (!port_priv->wq) {
2851                 ret = -ENOMEM;
2852                 goto error8;
2853         }
2854         INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2855
2856         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2857         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2858         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2859
2860         ret = ib_mad_port_start(port_priv);
2861         if (ret) {
2862                 printk(KERN_ERR PFX "Couldn't start port\n");
2863                 goto error9;
2864         }
2865
2866         return 0;
2867
2868 error9:
2869         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2870         list_del_init(&port_priv->port_list);
2871         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2872
2873         destroy_workqueue(port_priv->wq);
2874 error8:
2875         destroy_mad_qp(&port_priv->qp_info[1]);
2876 error7:
2877         destroy_mad_qp(&port_priv->qp_info[0]);
2878 error6:
2879         ib_dereg_mr(port_priv->mr);
2880 error5:
2881         ib_dealloc_pd(port_priv->pd);
2882 error4:
2883         ib_destroy_cq(port_priv->cq);
2884         cleanup_recv_queue(&port_priv->qp_info[1]);
2885         cleanup_recv_queue(&port_priv->qp_info[0]);
2886 error3:
2887         kfree(port_priv);
2888
2889         return ret;
2890 }
2891
2892 /*
2893  * Close the port
2894  * If there are no classes using the port, free the port
2895  * resources (CQ, MR, PD, QP) and remove the port's info structure
2896  */
2897 static int ib_mad_port_close(struct ib_device *device, int port_num)
2898 {
2899         struct ib_mad_port_private *port_priv;
2900         unsigned long flags;
2901
2902         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2903         port_priv = __ib_get_mad_port(device, port_num);
2904         if (port_priv == NULL) {
2905                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2906                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2907                 return -ENODEV;
2908         }
2909         list_del_init(&port_priv->port_list);
2910         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2911
2912         destroy_workqueue(port_priv->wq);
2913         destroy_mad_qp(&port_priv->qp_info[1]);
2914         destroy_mad_qp(&port_priv->qp_info[0]);
2915         ib_dereg_mr(port_priv->mr);
2916         ib_dealloc_pd(port_priv->pd);
2917         ib_destroy_cq(port_priv->cq);
2918         cleanup_recv_queue(&port_priv->qp_info[1]);
2919         cleanup_recv_queue(&port_priv->qp_info[0]);
2920         /* XXX: Handle deallocation of MAD registration tables */
2921
2922         kfree(port_priv);
2923
2924         return 0;
2925 }
2926
2927 static void ib_mad_init_device(struct ib_device *device)
2928 {
2929         int start, end, i;
2930
2931         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2932                 return;
2933
2934         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2935                 start = 0;
2936                 end   = 0;
2937         } else {
2938                 start = 1;
2939                 end   = device->phys_port_cnt;
2940         }
2941
2942         for (i = start; i <= end; i++) {
2943                 if (ib_mad_port_open(device, i)) {
2944                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2945                                device->name, i);
2946                         goto error;
2947                 }
2948                 if (ib_agent_port_open(device, i)) {
2949                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2950                                "for agents\n",
2951                                device->name, i);
2952                         goto error_agent;
2953                 }
2954         }
2955         return;
2956
2957 error_agent:
2958         if (ib_mad_port_close(device, i))
2959                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2960                        device->name, i);
2961
2962 error:
2963         i--;
2964
2965         while (i >= start) {
2966                 if (ib_agent_port_close(device, i))
2967                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2968                                "for agents\n",
2969                                device->name, i);
2970                 if (ib_mad_port_close(device, i))
2971                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2972                                device->name, i);
2973                 i--;
2974         }
2975 }
2976
2977 static void ib_mad_remove_device(struct ib_device *device)
2978 {
2979         int i, num_ports, cur_port;
2980
2981         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2982                 return;
2983
2984         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2985                 num_ports = 1;
2986                 cur_port = 0;
2987         } else {
2988                 num_ports = device->phys_port_cnt;
2989                 cur_port = 1;
2990         }
2991         for (i = 0; i < num_ports; i++, cur_port++) {
2992                 if (ib_agent_port_close(device, cur_port))
2993                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2994                                "for agents\n",
2995                                device->name, cur_port);
2996                 if (ib_mad_port_close(device, cur_port))
2997                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2998                                device->name, cur_port);
2999         }
3000 }
3001
3002 static struct ib_client mad_client = {
3003         .name   = "mad",
3004         .add = ib_mad_init_device,
3005         .remove = ib_mad_remove_device
3006 };
3007
3008 static int __init ib_mad_init_module(void)
3009 {
3010         int ret;
3011
3012         mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3013         mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3014
3015         mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3016         mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3017
3018         ib_mad_cache = kmem_cache_create("ib_mad",
3019                                          sizeof(struct ib_mad_private),
3020                                          0,
3021                                          SLAB_HWCACHE_ALIGN,
3022                                          NULL);
3023         if (!ib_mad_cache) {
3024                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3025                 ret = -ENOMEM;
3026                 goto error1;
3027         }
3028
3029         INIT_LIST_HEAD(&ib_mad_port_list);
3030
3031         if (ib_register_client(&mad_client)) {
3032                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3033                 ret = -EINVAL;
3034                 goto error2;
3035         }
3036
3037         return 0;
3038
3039 error2:
3040         kmem_cache_destroy(ib_mad_cache);
3041 error1:
3042         return ret;
3043 }
3044
3045 static void __exit ib_mad_cleanup_module(void)
3046 {
3047         ib_unregister_client(&mad_client);
3048         kmem_cache_destroy(ib_mad_cache);
3049 }
3050
3051 module_init(ib_mad_init_module);
3052 module_exit(ib_mad_cleanup_module);