drm/radeon/kms: enable use of unmappable VRAM V2
[pandora-kernel.git] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  * Copyright (c) 2009 HNR Consulting. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  */
36 #include <linux/dma-mapping.h>
37 #include <rdma/ib_cache.h>
38
39 #include "mad_priv.h"
40 #include "mad_rmpp.h"
41 #include "smi.h"
42 #include "agent.h"
43
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
48
49 int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
50 int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
51
52 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
53 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
54 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
55 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
56
57 static struct kmem_cache *ib_mad_cache;
58
59 static struct list_head ib_mad_port_list;
60 static u32 ib_mad_client_id = 0;
61
62 /* Port list lock */
63 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
64
65 /* Forward declarations */
66 static int method_in_use(struct ib_mad_mgmt_method_table **method,
67                          struct ib_mad_reg_req *mad_reg_req);
68 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
69 static struct ib_mad_agent_private *find_mad_agent(
70                                         struct ib_mad_port_private *port_priv,
71                                         struct ib_mad *mad);
72 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
73                                     struct ib_mad_private *mad);
74 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
75 static void timeout_sends(struct work_struct *work);
76 static void local_completions(struct work_struct *work);
77 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
78                               struct ib_mad_agent_private *agent_priv,
79                               u8 mgmt_class);
80 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
81                            struct ib_mad_agent_private *agent_priv);
82
83 /*
84  * Returns a ib_mad_port_private structure or NULL for a device/port
85  * Assumes ib_mad_port_list_lock is being held
86  */
87 static inline struct ib_mad_port_private *
88 __ib_get_mad_port(struct ib_device *device, int port_num)
89 {
90         struct ib_mad_port_private *entry;
91
92         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
93                 if (entry->device == device && entry->port_num == port_num)
94                         return entry;
95         }
96         return NULL;
97 }
98
99 /*
100  * Wrapper function to return a ib_mad_port_private structure or NULL
101  * for a device/port
102  */
103 static inline struct ib_mad_port_private *
104 ib_get_mad_port(struct ib_device *device, int port_num)
105 {
106         struct ib_mad_port_private *entry;
107         unsigned long flags;
108
109         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
110         entry = __ib_get_mad_port(device, port_num);
111         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
112
113         return entry;
114 }
115
116 static inline u8 convert_mgmt_class(u8 mgmt_class)
117 {
118         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
119         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
120                 0 : mgmt_class;
121 }
122
123 static int get_spl_qp_index(enum ib_qp_type qp_type)
124 {
125         switch (qp_type)
126         {
127         case IB_QPT_SMI:
128                 return 0;
129         case IB_QPT_GSI:
130                 return 1;
131         default:
132                 return -1;
133         }
134 }
135
136 static int vendor_class_index(u8 mgmt_class)
137 {
138         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
139 }
140
141 static int is_vendor_class(u8 mgmt_class)
142 {
143         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
144             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
145                 return 0;
146         return 1;
147 }
148
149 static int is_vendor_oui(char *oui)
150 {
151         if (oui[0] || oui[1] || oui[2])
152                 return 1;
153         return 0;
154 }
155
156 static int is_vendor_method_in_use(
157                 struct ib_mad_mgmt_vendor_class *vendor_class,
158                 struct ib_mad_reg_req *mad_reg_req)
159 {
160         struct ib_mad_mgmt_method_table *method;
161         int i;
162
163         for (i = 0; i < MAX_MGMT_OUI; i++) {
164                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
165                         method = vendor_class->method_table[i];
166                         if (method) {
167                                 if (method_in_use(&method, mad_reg_req))
168                                         return 1;
169                                 else
170                                         break;
171                         }
172                 }
173         }
174         return 0;
175 }
176
177 int ib_response_mad(struct ib_mad *mad)
178 {
179         return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
180                 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
181                 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
182                  (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
183 }
184 EXPORT_SYMBOL(ib_response_mad);
185
186 /*
187  * ib_register_mad_agent - Register to send/receive MADs
188  */
189 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
190                                            u8 port_num,
191                                            enum ib_qp_type qp_type,
192                                            struct ib_mad_reg_req *mad_reg_req,
193                                            u8 rmpp_version,
194                                            ib_mad_send_handler send_handler,
195                                            ib_mad_recv_handler recv_handler,
196                                            void *context)
197 {
198         struct ib_mad_port_private *port_priv;
199         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
200         struct ib_mad_agent_private *mad_agent_priv;
201         struct ib_mad_reg_req *reg_req = NULL;
202         struct ib_mad_mgmt_class_table *class;
203         struct ib_mad_mgmt_vendor_class_table *vendor;
204         struct ib_mad_mgmt_vendor_class *vendor_class;
205         struct ib_mad_mgmt_method_table *method;
206         int ret2, qpn;
207         unsigned long flags;
208         u8 mgmt_class, vclass;
209
210         /* Validate parameters */
211         qpn = get_spl_qp_index(qp_type);
212         if (qpn == -1)
213                 goto error1;
214
215         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
216                 goto error1;
217
218         /* Validate MAD registration request if supplied */
219         if (mad_reg_req) {
220                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION)
221                         goto error1;
222                 if (!recv_handler)
223                         goto error1;
224                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
225                         /*
226                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
227                          * one in this range currently allowed
228                          */
229                         if (mad_reg_req->mgmt_class !=
230                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
231                                 goto error1;
232                 } else if (mad_reg_req->mgmt_class == 0) {
233                         /*
234                          * Class 0 is reserved in IBA and is used for
235                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
236                          */
237                         goto error1;
238                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
239                         /*
240                          * If class is in "new" vendor range,
241                          * ensure supplied OUI is not zero
242                          */
243                         if (!is_vendor_oui(mad_reg_req->oui))
244                                 goto error1;
245                 }
246                 /* Make sure class supplied is consistent with RMPP */
247                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
248                         if (rmpp_version)
249                                 goto error1;
250                 }
251                 /* Make sure class supplied is consistent with QP type */
252                 if (qp_type == IB_QPT_SMI) {
253                         if ((mad_reg_req->mgmt_class !=
254                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
255                             (mad_reg_req->mgmt_class !=
256                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
257                                 goto error1;
258                 } else {
259                         if ((mad_reg_req->mgmt_class ==
260                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
261                             (mad_reg_req->mgmt_class ==
262                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE))
263                                 goto error1;
264                 }
265         } else {
266                 /* No registration request supplied */
267                 if (!send_handler)
268                         goto error1;
269         }
270
271         /* Validate device and port */
272         port_priv = ib_get_mad_port(device, port_num);
273         if (!port_priv) {
274                 ret = ERR_PTR(-ENODEV);
275                 goto error1;
276         }
277
278         /* Allocate structures */
279         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
280         if (!mad_agent_priv) {
281                 ret = ERR_PTR(-ENOMEM);
282                 goto error1;
283         }
284
285         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
286                                                  IB_ACCESS_LOCAL_WRITE);
287         if (IS_ERR(mad_agent_priv->agent.mr)) {
288                 ret = ERR_PTR(-ENOMEM);
289                 goto error2;
290         }
291
292         if (mad_reg_req) {
293                 reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
294                 if (!reg_req) {
295                         ret = ERR_PTR(-ENOMEM);
296                         goto error3;
297                 }
298                 /* Make a copy of the MAD registration request */
299                 memcpy(reg_req, mad_reg_req, sizeof *reg_req);
300         }
301
302         /* Now, fill in the various structures */
303         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
304         mad_agent_priv->reg_req = reg_req;
305         mad_agent_priv->agent.rmpp_version = rmpp_version;
306         mad_agent_priv->agent.device = device;
307         mad_agent_priv->agent.recv_handler = recv_handler;
308         mad_agent_priv->agent.send_handler = send_handler;
309         mad_agent_priv->agent.context = context;
310         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
311         mad_agent_priv->agent.port_num = port_num;
312         spin_lock_init(&mad_agent_priv->lock);
313         INIT_LIST_HEAD(&mad_agent_priv->send_list);
314         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
315         INIT_LIST_HEAD(&mad_agent_priv->done_list);
316         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
317         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
318         INIT_LIST_HEAD(&mad_agent_priv->local_list);
319         INIT_WORK(&mad_agent_priv->local_work, local_completions);
320         atomic_set(&mad_agent_priv->refcount, 1);
321         init_completion(&mad_agent_priv->comp);
322
323         spin_lock_irqsave(&port_priv->reg_lock, flags);
324         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
325
326         /*
327          * Make sure MAD registration (if supplied)
328          * is non overlapping with any existing ones
329          */
330         if (mad_reg_req) {
331                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
332                 if (!is_vendor_class(mgmt_class)) {
333                         class = port_priv->version[mad_reg_req->
334                                                    mgmt_class_version].class;
335                         if (class) {
336                                 method = class->method_table[mgmt_class];
337                                 if (method) {
338                                         if (method_in_use(&method,
339                                                            mad_reg_req))
340                                                 goto error4;
341                                 }
342                         }
343                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
344                                                   mgmt_class);
345                 } else {
346                         /* "New" vendor class range */
347                         vendor = port_priv->version[mad_reg_req->
348                                                     mgmt_class_version].vendor;
349                         if (vendor) {
350                                 vclass = vendor_class_index(mgmt_class);
351                                 vendor_class = vendor->vendor_class[vclass];
352                                 if (vendor_class) {
353                                         if (is_vendor_method_in_use(
354                                                         vendor_class,
355                                                         mad_reg_req))
356                                                 goto error4;
357                                 }
358                         }
359                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
360                 }
361                 if (ret2) {
362                         ret = ERR_PTR(ret2);
363                         goto error4;
364                 }
365         }
366
367         /* Add mad agent into port's agent list */
368         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
369         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
370
371         return &mad_agent_priv->agent;
372
373 error4:
374         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
375         kfree(reg_req);
376 error3:
377         ib_dereg_mr(mad_agent_priv->agent.mr);
378 error2:
379         kfree(mad_agent_priv);
380 error1:
381         return ret;
382 }
383 EXPORT_SYMBOL(ib_register_mad_agent);
384
385 static inline int is_snooping_sends(int mad_snoop_flags)
386 {
387         return (mad_snoop_flags &
388                 (/*IB_MAD_SNOOP_POSTED_SENDS |
389                  IB_MAD_SNOOP_RMPP_SENDS |*/
390                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
391                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
392 }
393
394 static inline int is_snooping_recvs(int mad_snoop_flags)
395 {
396         return (mad_snoop_flags &
397                 (IB_MAD_SNOOP_RECVS /*|
398                  IB_MAD_SNOOP_RMPP_RECVS*/));
399 }
400
401 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
402                                 struct ib_mad_snoop_private *mad_snoop_priv)
403 {
404         struct ib_mad_snoop_private **new_snoop_table;
405         unsigned long flags;
406         int i;
407
408         spin_lock_irqsave(&qp_info->snoop_lock, flags);
409         /* Check for empty slot in array. */
410         for (i = 0; i < qp_info->snoop_table_size; i++)
411                 if (!qp_info->snoop_table[i])
412                         break;
413
414         if (i == qp_info->snoop_table_size) {
415                 /* Grow table. */
416                 new_snoop_table = krealloc(qp_info->snoop_table,
417                                            sizeof mad_snoop_priv *
418                                            (qp_info->snoop_table_size + 1),
419                                            GFP_ATOMIC);
420                 if (!new_snoop_table) {
421                         i = -ENOMEM;
422                         goto out;
423                 }
424
425                 qp_info->snoop_table = new_snoop_table;
426                 qp_info->snoop_table_size++;
427         }
428         qp_info->snoop_table[i] = mad_snoop_priv;
429         atomic_inc(&qp_info->snoop_count);
430 out:
431         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
432         return i;
433 }
434
435 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
436                                            u8 port_num,
437                                            enum ib_qp_type qp_type,
438                                            int mad_snoop_flags,
439                                            ib_mad_snoop_handler snoop_handler,
440                                            ib_mad_recv_handler recv_handler,
441                                            void *context)
442 {
443         struct ib_mad_port_private *port_priv;
444         struct ib_mad_agent *ret;
445         struct ib_mad_snoop_private *mad_snoop_priv;
446         int qpn;
447
448         /* Validate parameters */
449         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
450             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
451                 ret = ERR_PTR(-EINVAL);
452                 goto error1;
453         }
454         qpn = get_spl_qp_index(qp_type);
455         if (qpn == -1) {
456                 ret = ERR_PTR(-EINVAL);
457                 goto error1;
458         }
459         port_priv = ib_get_mad_port(device, port_num);
460         if (!port_priv) {
461                 ret = ERR_PTR(-ENODEV);
462                 goto error1;
463         }
464         /* Allocate structures */
465         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
466         if (!mad_snoop_priv) {
467                 ret = ERR_PTR(-ENOMEM);
468                 goto error1;
469         }
470
471         /* Now, fill in the various structures */
472         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
473         mad_snoop_priv->agent.device = device;
474         mad_snoop_priv->agent.recv_handler = recv_handler;
475         mad_snoop_priv->agent.snoop_handler = snoop_handler;
476         mad_snoop_priv->agent.context = context;
477         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
478         mad_snoop_priv->agent.port_num = port_num;
479         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
480         init_completion(&mad_snoop_priv->comp);
481         mad_snoop_priv->snoop_index = register_snoop_agent(
482                                                 &port_priv->qp_info[qpn],
483                                                 mad_snoop_priv);
484         if (mad_snoop_priv->snoop_index < 0) {
485                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
486                 goto error2;
487         }
488
489         atomic_set(&mad_snoop_priv->refcount, 1);
490         return &mad_snoop_priv->agent;
491
492 error2:
493         kfree(mad_snoop_priv);
494 error1:
495         return ret;
496 }
497 EXPORT_SYMBOL(ib_register_mad_snoop);
498
499 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
500 {
501         if (atomic_dec_and_test(&mad_agent_priv->refcount))
502                 complete(&mad_agent_priv->comp);
503 }
504
505 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
506 {
507         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
508                 complete(&mad_snoop_priv->comp);
509 }
510
511 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
512 {
513         struct ib_mad_port_private *port_priv;
514         unsigned long flags;
515
516         /* Note that we could still be handling received MADs */
517
518         /*
519          * Canceling all sends results in dropping received response
520          * MADs, preventing us from queuing additional work
521          */
522         cancel_mads(mad_agent_priv);
523         port_priv = mad_agent_priv->qp_info->port_priv;
524         cancel_delayed_work(&mad_agent_priv->timed_work);
525
526         spin_lock_irqsave(&port_priv->reg_lock, flags);
527         remove_mad_reg_req(mad_agent_priv);
528         list_del(&mad_agent_priv->agent_list);
529         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
530
531         flush_workqueue(port_priv->wq);
532         ib_cancel_rmpp_recvs(mad_agent_priv);
533
534         deref_mad_agent(mad_agent_priv);
535         wait_for_completion(&mad_agent_priv->comp);
536
537         kfree(mad_agent_priv->reg_req);
538         ib_dereg_mr(mad_agent_priv->agent.mr);
539         kfree(mad_agent_priv);
540 }
541
542 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
543 {
544         struct ib_mad_qp_info *qp_info;
545         unsigned long flags;
546
547         qp_info = mad_snoop_priv->qp_info;
548         spin_lock_irqsave(&qp_info->snoop_lock, flags);
549         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
550         atomic_dec(&qp_info->snoop_count);
551         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
552
553         deref_snoop_agent(mad_snoop_priv);
554         wait_for_completion(&mad_snoop_priv->comp);
555
556         kfree(mad_snoop_priv);
557 }
558
559 /*
560  * ib_unregister_mad_agent - Unregisters a client from using MAD services
561  */
562 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
563 {
564         struct ib_mad_agent_private *mad_agent_priv;
565         struct ib_mad_snoop_private *mad_snoop_priv;
566
567         /* If the TID is zero, the agent can only snoop. */
568         if (mad_agent->hi_tid) {
569                 mad_agent_priv = container_of(mad_agent,
570                                               struct ib_mad_agent_private,
571                                               agent);
572                 unregister_mad_agent(mad_agent_priv);
573         } else {
574                 mad_snoop_priv = container_of(mad_agent,
575                                               struct ib_mad_snoop_private,
576                                               agent);
577                 unregister_mad_snoop(mad_snoop_priv);
578         }
579         return 0;
580 }
581 EXPORT_SYMBOL(ib_unregister_mad_agent);
582
583 static void dequeue_mad(struct ib_mad_list_head *mad_list)
584 {
585         struct ib_mad_queue *mad_queue;
586         unsigned long flags;
587
588         BUG_ON(!mad_list->mad_queue);
589         mad_queue = mad_list->mad_queue;
590         spin_lock_irqsave(&mad_queue->lock, flags);
591         list_del(&mad_list->list);
592         mad_queue->count--;
593         spin_unlock_irqrestore(&mad_queue->lock, flags);
594 }
595
596 static void snoop_send(struct ib_mad_qp_info *qp_info,
597                        struct ib_mad_send_buf *send_buf,
598                        struct ib_mad_send_wc *mad_send_wc,
599                        int mad_snoop_flags)
600 {
601         struct ib_mad_snoop_private *mad_snoop_priv;
602         unsigned long flags;
603         int i;
604
605         spin_lock_irqsave(&qp_info->snoop_lock, flags);
606         for (i = 0; i < qp_info->snoop_table_size; i++) {
607                 mad_snoop_priv = qp_info->snoop_table[i];
608                 if (!mad_snoop_priv ||
609                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
610                         continue;
611
612                 atomic_inc(&mad_snoop_priv->refcount);
613                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
614                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
615                                                     send_buf, mad_send_wc);
616                 deref_snoop_agent(mad_snoop_priv);
617                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
618         }
619         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
620 }
621
622 static void snoop_recv(struct ib_mad_qp_info *qp_info,
623                        struct ib_mad_recv_wc *mad_recv_wc,
624                        int mad_snoop_flags)
625 {
626         struct ib_mad_snoop_private *mad_snoop_priv;
627         unsigned long flags;
628         int i;
629
630         spin_lock_irqsave(&qp_info->snoop_lock, flags);
631         for (i = 0; i < qp_info->snoop_table_size; i++) {
632                 mad_snoop_priv = qp_info->snoop_table[i];
633                 if (!mad_snoop_priv ||
634                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
635                         continue;
636
637                 atomic_inc(&mad_snoop_priv->refcount);
638                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
639                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
640                                                    mad_recv_wc);
641                 deref_snoop_agent(mad_snoop_priv);
642                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
643         }
644         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
645 }
646
647 static void build_smp_wc(struct ib_qp *qp,
648                          u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
649                          struct ib_wc *wc)
650 {
651         memset(wc, 0, sizeof *wc);
652         wc->wr_id = wr_id;
653         wc->status = IB_WC_SUCCESS;
654         wc->opcode = IB_WC_RECV;
655         wc->pkey_index = pkey_index;
656         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
657         wc->src_qp = IB_QP0;
658         wc->qp = qp;
659         wc->slid = slid;
660         wc->sl = 0;
661         wc->dlid_path_bits = 0;
662         wc->port_num = port_num;
663 }
664
665 /*
666  * Return 0 if SMP is to be sent
667  * Return 1 if SMP was consumed locally (whether or not solicited)
668  * Return < 0 if error
669  */
670 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
671                                   struct ib_mad_send_wr_private *mad_send_wr)
672 {
673         int ret = 0;
674         struct ib_smp *smp = mad_send_wr->send_buf.mad;
675         unsigned long flags;
676         struct ib_mad_local_private *local;
677         struct ib_mad_private *mad_priv;
678         struct ib_mad_port_private *port_priv;
679         struct ib_mad_agent_private *recv_mad_agent = NULL;
680         struct ib_device *device = mad_agent_priv->agent.device;
681         u8 port_num;
682         struct ib_wc mad_wc;
683         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
684
685         if (device->node_type == RDMA_NODE_IB_SWITCH &&
686             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
687                 port_num = send_wr->wr.ud.port_num;
688         else
689                 port_num = mad_agent_priv->agent.port_num;
690
691         /*
692          * Directed route handling starts if the initial LID routed part of
693          * a request or the ending LID routed part of a response is empty.
694          * If we are at the start of the LID routed part, don't update the
695          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
696          */
697         if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
698              IB_LID_PERMISSIVE &&
699              smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
700              IB_SMI_DISCARD) {
701                 ret = -EINVAL;
702                 printk(KERN_ERR PFX "Invalid directed route\n");
703                 goto out;
704         }
705
706         /* Check to post send on QP or process locally */
707         if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
708             smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
709                 goto out;
710
711         local = kmalloc(sizeof *local, GFP_ATOMIC);
712         if (!local) {
713                 ret = -ENOMEM;
714                 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n");
715                 goto out;
716         }
717         local->mad_priv = NULL;
718         local->recv_mad_agent = NULL;
719         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC);
720         if (!mad_priv) {
721                 ret = -ENOMEM;
722                 printk(KERN_ERR PFX "No memory for local response MAD\n");
723                 kfree(local);
724                 goto out;
725         }
726
727         build_smp_wc(mad_agent_priv->agent.qp,
728                      send_wr->wr_id, be16_to_cpu(smp->dr_slid),
729                      send_wr->wr.ud.pkey_index,
730                      send_wr->wr.ud.port_num, &mad_wc);
731
732         /* No GRH for DR SMP */
733         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
734                                   (struct ib_mad *)smp,
735                                   (struct ib_mad *)&mad_priv->mad);
736         switch (ret)
737         {
738         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
739                 if (ib_response_mad(&mad_priv->mad.mad) &&
740                     mad_agent_priv->agent.recv_handler) {
741                         local->mad_priv = mad_priv;
742                         local->recv_mad_agent = mad_agent_priv;
743                         /*
744                          * Reference MAD agent until receive
745                          * side of local completion handled
746                          */
747                         atomic_inc(&mad_agent_priv->refcount);
748                 } else
749                         kmem_cache_free(ib_mad_cache, mad_priv);
750                 break;
751         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
752                 kmem_cache_free(ib_mad_cache, mad_priv);
753                 break;
754         case IB_MAD_RESULT_SUCCESS:
755                 /* Treat like an incoming receive MAD */
756                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
757                                             mad_agent_priv->agent.port_num);
758                 if (port_priv) {
759                         memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
760                         recv_mad_agent = find_mad_agent(port_priv,
761                                                         &mad_priv->mad.mad);
762                 }
763                 if (!port_priv || !recv_mad_agent) {
764                         /*
765                          * No receiving agent so drop packet and
766                          * generate send completion.
767                          */
768                         kmem_cache_free(ib_mad_cache, mad_priv);
769                         break;
770                 }
771                 local->mad_priv = mad_priv;
772                 local->recv_mad_agent = recv_mad_agent;
773                 break;
774         default:
775                 kmem_cache_free(ib_mad_cache, mad_priv);
776                 kfree(local);
777                 ret = -EINVAL;
778                 goto out;
779         }
780
781         local->mad_send_wr = mad_send_wr;
782         /* Reference MAD agent until send side of local completion handled */
783         atomic_inc(&mad_agent_priv->refcount);
784         /* Queue local completion to local list */
785         spin_lock_irqsave(&mad_agent_priv->lock, flags);
786         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
787         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
788         queue_work(mad_agent_priv->qp_info->port_priv->wq,
789                    &mad_agent_priv->local_work);
790         ret = 1;
791 out:
792         return ret;
793 }
794
795 static int get_pad_size(int hdr_len, int data_len)
796 {
797         int seg_size, pad;
798
799         seg_size = sizeof(struct ib_mad) - hdr_len;
800         if (data_len && seg_size) {
801                 pad = seg_size - data_len % seg_size;
802                 return pad == seg_size ? 0 : pad;
803         } else
804                 return seg_size;
805 }
806
807 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
808 {
809         struct ib_rmpp_segment *s, *t;
810
811         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
812                 list_del(&s->list);
813                 kfree(s);
814         }
815 }
816
817 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
818                                 gfp_t gfp_mask)
819 {
820         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
821         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
822         struct ib_rmpp_segment *seg = NULL;
823         int left, seg_size, pad;
824
825         send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len;
826         seg_size = send_buf->seg_size;
827         pad = send_wr->pad;
828
829         /* Allocate data segments. */
830         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
831                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
832                 if (!seg) {
833                         printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem "
834                                "alloc failed for len %zd, gfp %#x\n",
835                                sizeof (*seg) + seg_size, gfp_mask);
836                         free_send_rmpp_list(send_wr);
837                         return -ENOMEM;
838                 }
839                 seg->num = ++send_buf->seg_count;
840                 list_add_tail(&seg->list, &send_wr->rmpp_list);
841         }
842
843         /* Zero any padding */
844         if (pad)
845                 memset(seg->data + seg_size - pad, 0, pad);
846
847         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
848                                           agent.rmpp_version;
849         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
850         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
851
852         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
853                                         struct ib_rmpp_segment, list);
854         send_wr->last_ack_seg = send_wr->cur_seg;
855         return 0;
856 }
857
858 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
859                                             u32 remote_qpn, u16 pkey_index,
860                                             int rmpp_active,
861                                             int hdr_len, int data_len,
862                                             gfp_t gfp_mask)
863 {
864         struct ib_mad_agent_private *mad_agent_priv;
865         struct ib_mad_send_wr_private *mad_send_wr;
866         int pad, message_size, ret, size;
867         void *buf;
868
869         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
870                                       agent);
871         pad = get_pad_size(hdr_len, data_len);
872         message_size = hdr_len + data_len + pad;
873
874         if ((!mad_agent->rmpp_version &&
875              (rmpp_active || message_size > sizeof(struct ib_mad))) ||
876             (!rmpp_active && message_size > sizeof(struct ib_mad)))
877                 return ERR_PTR(-EINVAL);
878
879         size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
880         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
881         if (!buf)
882                 return ERR_PTR(-ENOMEM);
883
884         mad_send_wr = buf + size;
885         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
886         mad_send_wr->send_buf.mad = buf;
887         mad_send_wr->send_buf.hdr_len = hdr_len;
888         mad_send_wr->send_buf.data_len = data_len;
889         mad_send_wr->pad = pad;
890
891         mad_send_wr->mad_agent_priv = mad_agent_priv;
892         mad_send_wr->sg_list[0].length = hdr_len;
893         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
894         mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len;
895         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
896
897         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
898         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
899         mad_send_wr->send_wr.num_sge = 2;
900         mad_send_wr->send_wr.opcode = IB_WR_SEND;
901         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
902         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
903         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
904         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
905
906         if (rmpp_active) {
907                 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
908                 if (ret) {
909                         kfree(buf);
910                         return ERR_PTR(ret);
911                 }
912         }
913
914         mad_send_wr->send_buf.mad_agent = mad_agent;
915         atomic_inc(&mad_agent_priv->refcount);
916         return &mad_send_wr->send_buf;
917 }
918 EXPORT_SYMBOL(ib_create_send_mad);
919
920 int ib_get_mad_data_offset(u8 mgmt_class)
921 {
922         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
923                 return IB_MGMT_SA_HDR;
924         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
925                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
926                  (mgmt_class == IB_MGMT_CLASS_BIS))
927                 return IB_MGMT_DEVICE_HDR;
928         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
929                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
930                 return IB_MGMT_VENDOR_HDR;
931         else
932                 return IB_MGMT_MAD_HDR;
933 }
934 EXPORT_SYMBOL(ib_get_mad_data_offset);
935
936 int ib_is_mad_class_rmpp(u8 mgmt_class)
937 {
938         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
939             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
940             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
941             (mgmt_class == IB_MGMT_CLASS_BIS) ||
942             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
943              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
944                 return 1;
945         return 0;
946 }
947 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
948
949 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
950 {
951         struct ib_mad_send_wr_private *mad_send_wr;
952         struct list_head *list;
953
954         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
955                                    send_buf);
956         list = &mad_send_wr->cur_seg->list;
957
958         if (mad_send_wr->cur_seg->num < seg_num) {
959                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
960                         if (mad_send_wr->cur_seg->num == seg_num)
961                                 break;
962         } else if (mad_send_wr->cur_seg->num > seg_num) {
963                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
964                         if (mad_send_wr->cur_seg->num == seg_num)
965                                 break;
966         }
967         return mad_send_wr->cur_seg->data;
968 }
969 EXPORT_SYMBOL(ib_get_rmpp_segment);
970
971 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
972 {
973         if (mad_send_wr->send_buf.seg_count)
974                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
975                                            mad_send_wr->seg_num);
976         else
977                 return mad_send_wr->send_buf.mad +
978                        mad_send_wr->send_buf.hdr_len;
979 }
980
981 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
982 {
983         struct ib_mad_agent_private *mad_agent_priv;
984         struct ib_mad_send_wr_private *mad_send_wr;
985
986         mad_agent_priv = container_of(send_buf->mad_agent,
987                                       struct ib_mad_agent_private, agent);
988         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
989                                    send_buf);
990
991         free_send_rmpp_list(mad_send_wr);
992         kfree(send_buf->mad);
993         deref_mad_agent(mad_agent_priv);
994 }
995 EXPORT_SYMBOL(ib_free_send_mad);
996
997 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
998 {
999         struct ib_mad_qp_info *qp_info;
1000         struct list_head *list;
1001         struct ib_send_wr *bad_send_wr;
1002         struct ib_mad_agent *mad_agent;
1003         struct ib_sge *sge;
1004         unsigned long flags;
1005         int ret;
1006
1007         /* Set WR ID to find mad_send_wr upon completion */
1008         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1009         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1010         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1011
1012         mad_agent = mad_send_wr->send_buf.mad_agent;
1013         sge = mad_send_wr->sg_list;
1014         sge[0].addr = ib_dma_map_single(mad_agent->device,
1015                                         mad_send_wr->send_buf.mad,
1016                                         sge[0].length,
1017                                         DMA_TO_DEVICE);
1018         mad_send_wr->header_mapping = sge[0].addr;
1019
1020         sge[1].addr = ib_dma_map_single(mad_agent->device,
1021                                         ib_get_payload(mad_send_wr),
1022                                         sge[1].length,
1023                                         DMA_TO_DEVICE);
1024         mad_send_wr->payload_mapping = sge[1].addr;
1025
1026         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1027         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1028                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1029                                    &bad_send_wr);
1030                 list = &qp_info->send_queue.list;
1031         } else {
1032                 ret = 0;
1033                 list = &qp_info->overflow_list;
1034         }
1035
1036         if (!ret) {
1037                 qp_info->send_queue.count++;
1038                 list_add_tail(&mad_send_wr->mad_list.list, list);
1039         }
1040         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1041         if (ret) {
1042                 ib_dma_unmap_single(mad_agent->device,
1043                                     mad_send_wr->header_mapping,
1044                                     sge[0].length, DMA_TO_DEVICE);
1045                 ib_dma_unmap_single(mad_agent->device,
1046                                     mad_send_wr->payload_mapping,
1047                                     sge[1].length, DMA_TO_DEVICE);
1048         }
1049         return ret;
1050 }
1051
1052 /*
1053  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1054  *  with the registered client
1055  */
1056 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1057                      struct ib_mad_send_buf **bad_send_buf)
1058 {
1059         struct ib_mad_agent_private *mad_agent_priv;
1060         struct ib_mad_send_buf *next_send_buf;
1061         struct ib_mad_send_wr_private *mad_send_wr;
1062         unsigned long flags;
1063         int ret = -EINVAL;
1064
1065         /* Walk list of send WRs and post each on send list */
1066         for (; send_buf; send_buf = next_send_buf) {
1067
1068                 mad_send_wr = container_of(send_buf,
1069                                            struct ib_mad_send_wr_private,
1070                                            send_buf);
1071                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1072
1073                 if (!send_buf->mad_agent->send_handler ||
1074                     (send_buf->timeout_ms &&
1075                      !send_buf->mad_agent->recv_handler)) {
1076                         ret = -EINVAL;
1077                         goto error;
1078                 }
1079
1080                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1081                         if (mad_agent_priv->agent.rmpp_version) {
1082                                 ret = -EINVAL;
1083                                 goto error;
1084                         }
1085                 }
1086
1087                 /*
1088                  * Save pointer to next work request to post in case the
1089                  * current one completes, and the user modifies the work
1090                  * request associated with the completion
1091                  */
1092                 next_send_buf = send_buf->next;
1093                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1094
1095                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1096                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1097                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1098                                                      mad_send_wr);
1099                         if (ret < 0)            /* error */
1100                                 goto error;
1101                         else if (ret == 1)      /* locally consumed */
1102                                 continue;
1103                 }
1104
1105                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1106                 /* Timeout will be updated after send completes */
1107                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1108                 mad_send_wr->max_retries = send_buf->retries;
1109                 mad_send_wr->retries_left = send_buf->retries;
1110                 send_buf->retries = 0;
1111                 /* Reference for work request to QP + response */
1112                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1113                 mad_send_wr->status = IB_WC_SUCCESS;
1114
1115                 /* Reference MAD agent until send completes */
1116                 atomic_inc(&mad_agent_priv->refcount);
1117                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1118                 list_add_tail(&mad_send_wr->agent_list,
1119                               &mad_agent_priv->send_list);
1120                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1121
1122                 if (mad_agent_priv->agent.rmpp_version) {
1123                         ret = ib_send_rmpp_mad(mad_send_wr);
1124                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1125                                 ret = ib_send_mad(mad_send_wr);
1126                 } else
1127                         ret = ib_send_mad(mad_send_wr);
1128                 if (ret < 0) {
1129                         /* Fail send request */
1130                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1131                         list_del(&mad_send_wr->agent_list);
1132                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1133                         atomic_dec(&mad_agent_priv->refcount);
1134                         goto error;
1135                 }
1136         }
1137         return 0;
1138 error:
1139         if (bad_send_buf)
1140                 *bad_send_buf = send_buf;
1141         return ret;
1142 }
1143 EXPORT_SYMBOL(ib_post_send_mad);
1144
1145 /*
1146  * ib_free_recv_mad - Returns data buffers used to receive
1147  *  a MAD to the access layer
1148  */
1149 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1150 {
1151         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1152         struct ib_mad_private_header *mad_priv_hdr;
1153         struct ib_mad_private *priv;
1154         struct list_head free_list;
1155
1156         INIT_LIST_HEAD(&free_list);
1157         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1158
1159         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1160                                         &free_list, list) {
1161                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1162                                            recv_buf);
1163                 mad_priv_hdr = container_of(mad_recv_wc,
1164                                             struct ib_mad_private_header,
1165                                             recv_wc);
1166                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1167                                     header);
1168                 kmem_cache_free(ib_mad_cache, priv);
1169         }
1170 }
1171 EXPORT_SYMBOL(ib_free_recv_mad);
1172
1173 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1174                                         u8 rmpp_version,
1175                                         ib_mad_send_handler send_handler,
1176                                         ib_mad_recv_handler recv_handler,
1177                                         void *context)
1178 {
1179         return ERR_PTR(-EINVAL);        /* XXX: for now */
1180 }
1181 EXPORT_SYMBOL(ib_redirect_mad_qp);
1182
1183 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1184                       struct ib_wc *wc)
1185 {
1186         printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n");
1187         return 0;
1188 }
1189 EXPORT_SYMBOL(ib_process_mad_wc);
1190
1191 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1192                          struct ib_mad_reg_req *mad_reg_req)
1193 {
1194         int i;
1195
1196         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1197                 if ((*method)->agent[i]) {
1198                         printk(KERN_ERR PFX "Method %d already in use\n", i);
1199                         return -EINVAL;
1200                 }
1201         }
1202         return 0;
1203 }
1204
1205 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1206 {
1207         /* Allocate management method table */
1208         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1209         if (!*method) {
1210                 printk(KERN_ERR PFX "No memory for "
1211                        "ib_mad_mgmt_method_table\n");
1212                 return -ENOMEM;
1213         }
1214
1215         return 0;
1216 }
1217
1218 /*
1219  * Check to see if there are any methods still in use
1220  */
1221 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1222 {
1223         int i;
1224
1225         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1226                 if (method->agent[i])
1227                         return 1;
1228         return 0;
1229 }
1230
1231 /*
1232  * Check to see if there are any method tables for this class still in use
1233  */
1234 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1235 {
1236         int i;
1237
1238         for (i = 0; i < MAX_MGMT_CLASS; i++)
1239                 if (class->method_table[i])
1240                         return 1;
1241         return 0;
1242 }
1243
1244 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1245 {
1246         int i;
1247
1248         for (i = 0; i < MAX_MGMT_OUI; i++)
1249                 if (vendor_class->method_table[i])
1250                         return 1;
1251         return 0;
1252 }
1253
1254 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1255                            char *oui)
1256 {
1257         int i;
1258
1259         for (i = 0; i < MAX_MGMT_OUI; i++)
1260                 /* Is there matching OUI for this vendor class ? */
1261                 if (!memcmp(vendor_class->oui[i], oui, 3))
1262                         return i;
1263
1264         return -1;
1265 }
1266
1267 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1268 {
1269         int i;
1270
1271         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1272                 if (vendor->vendor_class[i])
1273                         return 1;
1274
1275         return 0;
1276 }
1277
1278 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1279                                      struct ib_mad_agent_private *agent)
1280 {
1281         int i;
1282
1283         /* Remove any methods for this mad agent */
1284         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1285                 if (method->agent[i] == agent) {
1286                         method->agent[i] = NULL;
1287                 }
1288         }
1289 }
1290
1291 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1292                               struct ib_mad_agent_private *agent_priv,
1293                               u8 mgmt_class)
1294 {
1295         struct ib_mad_port_private *port_priv;
1296         struct ib_mad_mgmt_class_table **class;
1297         struct ib_mad_mgmt_method_table **method;
1298         int i, ret;
1299
1300         port_priv = agent_priv->qp_info->port_priv;
1301         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1302         if (!*class) {
1303                 /* Allocate management class table for "new" class version */
1304                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1305                 if (!*class) {
1306                         printk(KERN_ERR PFX "No memory for "
1307                                "ib_mad_mgmt_class_table\n");
1308                         ret = -ENOMEM;
1309                         goto error1;
1310                 }
1311
1312                 /* Allocate method table for this management class */
1313                 method = &(*class)->method_table[mgmt_class];
1314                 if ((ret = allocate_method_table(method)))
1315                         goto error2;
1316         } else {
1317                 method = &(*class)->method_table[mgmt_class];
1318                 if (!*method) {
1319                         /* Allocate method table for this management class */
1320                         if ((ret = allocate_method_table(method)))
1321                                 goto error1;
1322                 }
1323         }
1324
1325         /* Now, make sure methods are not already in use */
1326         if (method_in_use(method, mad_reg_req))
1327                 goto error3;
1328
1329         /* Finally, add in methods being registered */
1330         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1331                 (*method)->agent[i] = agent_priv;
1332
1333         return 0;
1334
1335 error3:
1336         /* Remove any methods for this mad agent */
1337         remove_methods_mad_agent(*method, agent_priv);
1338         /* Now, check to see if there are any methods in use */
1339         if (!check_method_table(*method)) {
1340                 /* If not, release management method table */
1341                 kfree(*method);
1342                 *method = NULL;
1343         }
1344         ret = -EINVAL;
1345         goto error1;
1346 error2:
1347         kfree(*class);
1348         *class = NULL;
1349 error1:
1350         return ret;
1351 }
1352
1353 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1354                            struct ib_mad_agent_private *agent_priv)
1355 {
1356         struct ib_mad_port_private *port_priv;
1357         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1358         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1359         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1360         struct ib_mad_mgmt_method_table **method;
1361         int i, ret = -ENOMEM;
1362         u8 vclass;
1363
1364         /* "New" vendor (with OUI) class */
1365         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1366         port_priv = agent_priv->qp_info->port_priv;
1367         vendor_table = &port_priv->version[
1368                                 mad_reg_req->mgmt_class_version].vendor;
1369         if (!*vendor_table) {
1370                 /* Allocate mgmt vendor class table for "new" class version */
1371                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1372                 if (!vendor) {
1373                         printk(KERN_ERR PFX "No memory for "
1374                                "ib_mad_mgmt_vendor_class_table\n");
1375                         goto error1;
1376                 }
1377
1378                 *vendor_table = vendor;
1379         }
1380         if (!(*vendor_table)->vendor_class[vclass]) {
1381                 /* Allocate table for this management vendor class */
1382                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1383                 if (!vendor_class) {
1384                         printk(KERN_ERR PFX "No memory for "
1385                                "ib_mad_mgmt_vendor_class\n");
1386                         goto error2;
1387                 }
1388
1389                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1390         }
1391         for (i = 0; i < MAX_MGMT_OUI; i++) {
1392                 /* Is there matching OUI for this vendor class ? */
1393                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1394                             mad_reg_req->oui, 3)) {
1395                         method = &(*vendor_table)->vendor_class[
1396                                                 vclass]->method_table[i];
1397                         BUG_ON(!*method);
1398                         goto check_in_use;
1399                 }
1400         }
1401         for (i = 0; i < MAX_MGMT_OUI; i++) {
1402                 /* OUI slot available ? */
1403                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1404                                 vclass]->oui[i])) {
1405                         method = &(*vendor_table)->vendor_class[
1406                                 vclass]->method_table[i];
1407                         BUG_ON(*method);
1408                         /* Allocate method table for this OUI */
1409                         if ((ret = allocate_method_table(method)))
1410                                 goto error3;
1411                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1412                                mad_reg_req->oui, 3);
1413                         goto check_in_use;
1414                 }
1415         }
1416         printk(KERN_ERR PFX "All OUI slots in use\n");
1417         goto error3;
1418
1419 check_in_use:
1420         /* Now, make sure methods are not already in use */
1421         if (method_in_use(method, mad_reg_req))
1422                 goto error4;
1423
1424         /* Finally, add in methods being registered */
1425         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1426                 (*method)->agent[i] = agent_priv;
1427
1428         return 0;
1429
1430 error4:
1431         /* Remove any methods for this mad agent */
1432         remove_methods_mad_agent(*method, agent_priv);
1433         /* Now, check to see if there are any methods in use */
1434         if (!check_method_table(*method)) {
1435                 /* If not, release management method table */
1436                 kfree(*method);
1437                 *method = NULL;
1438         }
1439         ret = -EINVAL;
1440 error3:
1441         if (vendor_class) {
1442                 (*vendor_table)->vendor_class[vclass] = NULL;
1443                 kfree(vendor_class);
1444         }
1445 error2:
1446         if (vendor) {
1447                 *vendor_table = NULL;
1448                 kfree(vendor);
1449         }
1450 error1:
1451         return ret;
1452 }
1453
1454 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1455 {
1456         struct ib_mad_port_private *port_priv;
1457         struct ib_mad_mgmt_class_table *class;
1458         struct ib_mad_mgmt_method_table *method;
1459         struct ib_mad_mgmt_vendor_class_table *vendor;
1460         struct ib_mad_mgmt_vendor_class *vendor_class;
1461         int index;
1462         u8 mgmt_class;
1463
1464         /*
1465          * Was MAD registration request supplied
1466          * with original registration ?
1467          */
1468         if (!agent_priv->reg_req) {
1469                 goto out;
1470         }
1471
1472         port_priv = agent_priv->qp_info->port_priv;
1473         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1474         class = port_priv->version[
1475                         agent_priv->reg_req->mgmt_class_version].class;
1476         if (!class)
1477                 goto vendor_check;
1478
1479         method = class->method_table[mgmt_class];
1480         if (method) {
1481                 /* Remove any methods for this mad agent */
1482                 remove_methods_mad_agent(method, agent_priv);
1483                 /* Now, check to see if there are any methods still in use */
1484                 if (!check_method_table(method)) {
1485                         /* If not, release management method table */
1486                          kfree(method);
1487                          class->method_table[mgmt_class] = NULL;
1488                          /* Any management classes left ? */
1489                         if (!check_class_table(class)) {
1490                                 /* If not, release management class table */
1491                                 kfree(class);
1492                                 port_priv->version[
1493                                         agent_priv->reg_req->
1494                                         mgmt_class_version].class = NULL;
1495                         }
1496                 }
1497         }
1498
1499 vendor_check:
1500         if (!is_vendor_class(mgmt_class))
1501                 goto out;
1502
1503         /* normalize mgmt_class to vendor range 2 */
1504         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1505         vendor = port_priv->version[
1506                         agent_priv->reg_req->mgmt_class_version].vendor;
1507
1508         if (!vendor)
1509                 goto out;
1510
1511         vendor_class = vendor->vendor_class[mgmt_class];
1512         if (vendor_class) {
1513                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1514                 if (index < 0)
1515                         goto out;
1516                 method = vendor_class->method_table[index];
1517                 if (method) {
1518                         /* Remove any methods for this mad agent */
1519                         remove_methods_mad_agent(method, agent_priv);
1520                         /*
1521                          * Now, check to see if there are
1522                          * any methods still in use
1523                          */
1524                         if (!check_method_table(method)) {
1525                                 /* If not, release management method table */
1526                                 kfree(method);
1527                                 vendor_class->method_table[index] = NULL;
1528                                 memset(vendor_class->oui[index], 0, 3);
1529                                 /* Any OUIs left ? */
1530                                 if (!check_vendor_class(vendor_class)) {
1531                                         /* If not, release vendor class table */
1532                                         kfree(vendor_class);
1533                                         vendor->vendor_class[mgmt_class] = NULL;
1534                                         /* Any other vendor classes left ? */
1535                                         if (!check_vendor_table(vendor)) {
1536                                                 kfree(vendor);
1537                                                 port_priv->version[
1538                                                         agent_priv->reg_req->
1539                                                         mgmt_class_version].
1540                                                         vendor = NULL;
1541                                         }
1542                                 }
1543                         }
1544                 }
1545         }
1546
1547 out:
1548         return;
1549 }
1550
1551 static struct ib_mad_agent_private *
1552 find_mad_agent(struct ib_mad_port_private *port_priv,
1553                struct ib_mad *mad)
1554 {
1555         struct ib_mad_agent_private *mad_agent = NULL;
1556         unsigned long flags;
1557
1558         spin_lock_irqsave(&port_priv->reg_lock, flags);
1559         if (ib_response_mad(mad)) {
1560                 u32 hi_tid;
1561                 struct ib_mad_agent_private *entry;
1562
1563                 /*
1564                  * Routing is based on high 32 bits of transaction ID
1565                  * of MAD.
1566                  */
1567                 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1568                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1569                         if (entry->agent.hi_tid == hi_tid) {
1570                                 mad_agent = entry;
1571                                 break;
1572                         }
1573                 }
1574         } else {
1575                 struct ib_mad_mgmt_class_table *class;
1576                 struct ib_mad_mgmt_method_table *method;
1577                 struct ib_mad_mgmt_vendor_class_table *vendor;
1578                 struct ib_mad_mgmt_vendor_class *vendor_class;
1579                 struct ib_vendor_mad *vendor_mad;
1580                 int index;
1581
1582                 /*
1583                  * Routing is based on version, class, and method
1584                  * For "newer" vendor MADs, also based on OUI
1585                  */
1586                 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION)
1587                         goto out;
1588                 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) {
1589                         class = port_priv->version[
1590                                         mad->mad_hdr.class_version].class;
1591                         if (!class)
1592                                 goto out;
1593                         method = class->method_table[convert_mgmt_class(
1594                                                         mad->mad_hdr.mgmt_class)];
1595                         if (method)
1596                                 mad_agent = method->agent[mad->mad_hdr.method &
1597                                                           ~IB_MGMT_METHOD_RESP];
1598                 } else {
1599                         vendor = port_priv->version[
1600                                         mad->mad_hdr.class_version].vendor;
1601                         if (!vendor)
1602                                 goto out;
1603                         vendor_class = vendor->vendor_class[vendor_class_index(
1604                                                 mad->mad_hdr.mgmt_class)];
1605                         if (!vendor_class)
1606                                 goto out;
1607                         /* Find matching OUI */
1608                         vendor_mad = (struct ib_vendor_mad *)mad;
1609                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1610                         if (index == -1)
1611                                 goto out;
1612                         method = vendor_class->method_table[index];
1613                         if (method) {
1614                                 mad_agent = method->agent[mad->mad_hdr.method &
1615                                                           ~IB_MGMT_METHOD_RESP];
1616                         }
1617                 }
1618         }
1619
1620         if (mad_agent) {
1621                 if (mad_agent->agent.recv_handler)
1622                         atomic_inc(&mad_agent->refcount);
1623                 else {
1624                         printk(KERN_NOTICE PFX "No receive handler for client "
1625                                "%p on port %d\n",
1626                                &mad_agent->agent, port_priv->port_num);
1627                         mad_agent = NULL;
1628                 }
1629         }
1630 out:
1631         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1632
1633         return mad_agent;
1634 }
1635
1636 static int validate_mad(struct ib_mad *mad, u32 qp_num)
1637 {
1638         int valid = 0;
1639
1640         /* Make sure MAD base version is understood */
1641         if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
1642                 printk(KERN_ERR PFX "MAD received with unsupported base "
1643                        "version %d\n", mad->mad_hdr.base_version);
1644                 goto out;
1645         }
1646
1647         /* Filter SMI packets sent to other than QP0 */
1648         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1649             (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1650                 if (qp_num == 0)
1651                         valid = 1;
1652         } else {
1653                 /* Filter GSI packets sent to QP0 */
1654                 if (qp_num != 0)
1655                         valid = 1;
1656         }
1657
1658 out:
1659         return valid;
1660 }
1661
1662 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1663                        struct ib_mad_hdr *mad_hdr)
1664 {
1665         struct ib_rmpp_mad *rmpp_mad;
1666
1667         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1668         return !mad_agent_priv->agent.rmpp_version ||
1669                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1670                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1671                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1672 }
1673
1674 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1675                                      struct ib_mad_recv_wc *rwc)
1676 {
1677         return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1678                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1679 }
1680
1681 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1682                                    struct ib_mad_send_wr_private *wr,
1683                                    struct ib_mad_recv_wc *rwc )
1684 {
1685         struct ib_ah_attr attr;
1686         u8 send_resp, rcv_resp;
1687         union ib_gid sgid;
1688         struct ib_device *device = mad_agent_priv->agent.device;
1689         u8 port_num = mad_agent_priv->agent.port_num;
1690         u8 lmc;
1691
1692         send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1693         rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1694
1695         if (send_resp == rcv_resp)
1696                 /* both requests, or both responses. GIDs different */
1697                 return 0;
1698
1699         if (ib_query_ah(wr->send_buf.ah, &attr))
1700                 /* Assume not equal, to avoid false positives. */
1701                 return 0;
1702
1703         if (!!(attr.ah_flags & IB_AH_GRH) !=
1704             !!(rwc->wc->wc_flags & IB_WC_GRH))
1705                 /* one has GID, other does not.  Assume different */
1706                 return 0;
1707
1708         if (!send_resp && rcv_resp) {
1709                 /* is request/response. */
1710                 if (!(attr.ah_flags & IB_AH_GRH)) {
1711                         if (ib_get_cached_lmc(device, port_num, &lmc))
1712                                 return 0;
1713                         return (!lmc || !((attr.src_path_bits ^
1714                                            rwc->wc->dlid_path_bits) &
1715                                           ((1 << lmc) - 1)));
1716                 } else {
1717                         if (ib_get_cached_gid(device, port_num,
1718                                               attr.grh.sgid_index, &sgid))
1719                                 return 0;
1720                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1721                                        16);
1722                 }
1723         }
1724
1725         if (!(attr.ah_flags & IB_AH_GRH))
1726                 return attr.dlid == rwc->wc->slid;
1727         else
1728                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1729                                16);
1730 }
1731
1732 static inline int is_direct(u8 class)
1733 {
1734         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1735 }
1736
1737 struct ib_mad_send_wr_private*
1738 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1739                  struct ib_mad_recv_wc *wc)
1740 {
1741         struct ib_mad_send_wr_private *wr;
1742         struct ib_mad *mad;
1743
1744         mad = (struct ib_mad *)wc->recv_buf.mad;
1745
1746         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1747                 if ((wr->tid == mad->mad_hdr.tid) &&
1748                     rcv_has_same_class(wr, wc) &&
1749                     /*
1750                      * Don't check GID for direct routed MADs.
1751                      * These might have permissive LIDs.
1752                      */
1753                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1754                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1755                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1756         }
1757
1758         /*
1759          * It's possible to receive the response before we've
1760          * been notified that the send has completed
1761          */
1762         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1763                 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1764                     wr->tid == mad->mad_hdr.tid &&
1765                     wr->timeout &&
1766                     rcv_has_same_class(wr, wc) &&
1767                     /*
1768                      * Don't check GID for direct routed MADs.
1769                      * These might have permissive LIDs.
1770                      */
1771                     (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) ||
1772                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1773                         /* Verify request has not been canceled */
1774                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1775         }
1776         return NULL;
1777 }
1778
1779 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1780 {
1781         mad_send_wr->timeout = 0;
1782         if (mad_send_wr->refcount == 1)
1783                 list_move_tail(&mad_send_wr->agent_list,
1784                               &mad_send_wr->mad_agent_priv->done_list);
1785 }
1786
1787 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1788                                  struct ib_mad_recv_wc *mad_recv_wc)
1789 {
1790         struct ib_mad_send_wr_private *mad_send_wr;
1791         struct ib_mad_send_wc mad_send_wc;
1792         unsigned long flags;
1793
1794         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1795         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1796         if (mad_agent_priv->agent.rmpp_version) {
1797                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1798                                                       mad_recv_wc);
1799                 if (!mad_recv_wc) {
1800                         deref_mad_agent(mad_agent_priv);
1801                         return;
1802                 }
1803         }
1804
1805         /* Complete corresponding request */
1806         if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
1807                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1808                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1809                 if (!mad_send_wr) {
1810                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1811                         ib_free_recv_mad(mad_recv_wc);
1812                         deref_mad_agent(mad_agent_priv);
1813                         return;
1814                 }
1815                 ib_mark_mad_done(mad_send_wr);
1816                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1817
1818                 /* Defined behavior is to complete response before request */
1819                 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1820                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1821                                                    mad_recv_wc);
1822                 atomic_dec(&mad_agent_priv->refcount);
1823
1824                 mad_send_wc.status = IB_WC_SUCCESS;
1825                 mad_send_wc.vendor_err = 0;
1826                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1827                 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1828         } else {
1829                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1830                                                    mad_recv_wc);
1831                 deref_mad_agent(mad_agent_priv);
1832         }
1833 }
1834
1835 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1836                                      struct ib_wc *wc)
1837 {
1838         struct ib_mad_qp_info *qp_info;
1839         struct ib_mad_private_header *mad_priv_hdr;
1840         struct ib_mad_private *recv, *response = NULL;
1841         struct ib_mad_list_head *mad_list;
1842         struct ib_mad_agent_private *mad_agent;
1843         int port_num;
1844
1845         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
1846         qp_info = mad_list->mad_queue->qp_info;
1847         dequeue_mad(mad_list);
1848
1849         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1850                                     mad_list);
1851         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1852         ib_dma_unmap_single(port_priv->device,
1853                             recv->header.mapping,
1854                             sizeof(struct ib_mad_private) -
1855                               sizeof(struct ib_mad_private_header),
1856                             DMA_FROM_DEVICE);
1857
1858         /* Setup MAD receive work completion from "normal" work completion */
1859         recv->header.wc = *wc;
1860         recv->header.recv_wc.wc = &recv->header.wc;
1861         recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
1862         recv->header.recv_wc.recv_buf.mad = &recv->mad.mad;
1863         recv->header.recv_wc.recv_buf.grh = &recv->grh;
1864
1865         if (atomic_read(&qp_info->snoop_count))
1866                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
1867
1868         /* Validate MAD */
1869         if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
1870                 goto out;
1871
1872         response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1873         if (!response) {
1874                 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory "
1875                        "for response buffer\n");
1876                 goto out;
1877         }
1878
1879         if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1880                 port_num = wc->port_num;
1881         else
1882                 port_num = port_priv->port_num;
1883
1884         if (recv->mad.mad.mad_hdr.mgmt_class ==
1885             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1886                 enum smi_forward_action retsmi;
1887
1888                 if (smi_handle_dr_smp_recv(&recv->mad.smp,
1889                                            port_priv->device->node_type,
1890                                            port_num,
1891                                            port_priv->device->phys_port_cnt) ==
1892                                            IB_SMI_DISCARD)
1893                         goto out;
1894
1895                 retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
1896                 if (retsmi == IB_SMI_LOCAL)
1897                         goto local;
1898
1899                 if (retsmi == IB_SMI_SEND) { /* don't forward */
1900                         if (smi_handle_dr_smp_send(&recv->mad.smp,
1901                                                    port_priv->device->node_type,
1902                                                    port_num) == IB_SMI_DISCARD)
1903                                 goto out;
1904
1905                         if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1906                                 goto out;
1907                 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1908                         /* forward case for switches */
1909                         memcpy(response, recv, sizeof(*response));
1910                         response->header.recv_wc.wc = &response->header.wc;
1911                         response->header.recv_wc.recv_buf.mad = &response->mad.mad;
1912                         response->header.recv_wc.recv_buf.grh = &response->grh;
1913
1914                         agent_send_response(&response->mad.mad,
1915                                             &response->grh, wc,
1916                                             port_priv->device,
1917                                             smi_get_fwd_port(&recv->mad.smp),
1918                                             qp_info->qp->qp_num);
1919
1920                         goto out;
1921                 }
1922         }
1923
1924 local:
1925         /* Give driver "right of first refusal" on incoming MAD */
1926         if (port_priv->device->process_mad) {
1927                 int ret;
1928
1929                 ret = port_priv->device->process_mad(port_priv->device, 0,
1930                                                      port_priv->port_num,
1931                                                      wc, &recv->grh,
1932                                                      &recv->mad.mad,
1933                                                      &response->mad.mad);
1934                 if (ret & IB_MAD_RESULT_SUCCESS) {
1935                         if (ret & IB_MAD_RESULT_CONSUMED)
1936                                 goto out;
1937                         if (ret & IB_MAD_RESULT_REPLY) {
1938                                 agent_send_response(&response->mad.mad,
1939                                                     &recv->grh, wc,
1940                                                     port_priv->device,
1941                                                     port_num,
1942                                                     qp_info->qp->qp_num);
1943                                 goto out;
1944                         }
1945                 }
1946         }
1947
1948         mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1949         if (mad_agent) {
1950                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1951                 /*
1952                  * recv is freed up in error cases in ib_mad_complete_recv
1953                  * or via recv_handler in ib_mad_complete_recv()
1954                  */
1955                 recv = NULL;
1956         }
1957
1958 out:
1959         /* Post another receive request for this QP */
1960         if (response) {
1961                 ib_mad_post_receive_mads(qp_info, response);
1962                 if (recv)
1963                         kmem_cache_free(ib_mad_cache, recv);
1964         } else
1965                 ib_mad_post_receive_mads(qp_info, recv);
1966 }
1967
1968 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1969 {
1970         struct ib_mad_send_wr_private *mad_send_wr;
1971         unsigned long delay;
1972
1973         if (list_empty(&mad_agent_priv->wait_list)) {
1974                 __cancel_delayed_work(&mad_agent_priv->timed_work);
1975         } else {
1976                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1977                                          struct ib_mad_send_wr_private,
1978                                          agent_list);
1979
1980                 if (time_after(mad_agent_priv->timeout,
1981                                mad_send_wr->timeout)) {
1982                         mad_agent_priv->timeout = mad_send_wr->timeout;
1983                         __cancel_delayed_work(&mad_agent_priv->timed_work);
1984                         delay = mad_send_wr->timeout - jiffies;
1985                         if ((long)delay <= 0)
1986                                 delay = 1;
1987                         queue_delayed_work(mad_agent_priv->qp_info->
1988                                            port_priv->wq,
1989                                            &mad_agent_priv->timed_work, delay);
1990                 }
1991         }
1992 }
1993
1994 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1995 {
1996         struct ib_mad_agent_private *mad_agent_priv;
1997         struct ib_mad_send_wr_private *temp_mad_send_wr;
1998         struct list_head *list_item;
1999         unsigned long delay;
2000
2001         mad_agent_priv = mad_send_wr->mad_agent_priv;
2002         list_del(&mad_send_wr->agent_list);
2003
2004         delay = mad_send_wr->timeout;
2005         mad_send_wr->timeout += jiffies;
2006
2007         if (delay) {
2008                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2009                         temp_mad_send_wr = list_entry(list_item,
2010                                                 struct ib_mad_send_wr_private,
2011                                                 agent_list);
2012                         if (time_after(mad_send_wr->timeout,
2013                                        temp_mad_send_wr->timeout))
2014                                 break;
2015                 }
2016         }
2017         else
2018                 list_item = &mad_agent_priv->wait_list;
2019         list_add(&mad_send_wr->agent_list, list_item);
2020
2021         /* Reschedule a work item if we have a shorter timeout */
2022         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2023                 __cancel_delayed_work(&mad_agent_priv->timed_work);
2024                 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2025                                    &mad_agent_priv->timed_work, delay);
2026         }
2027 }
2028
2029 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2030                           int timeout_ms)
2031 {
2032         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2033         wait_for_response(mad_send_wr);
2034 }
2035
2036 /*
2037  * Process a send work completion
2038  */
2039 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2040                              struct ib_mad_send_wc *mad_send_wc)
2041 {
2042         struct ib_mad_agent_private     *mad_agent_priv;
2043         unsigned long                   flags;
2044         int                             ret;
2045
2046         mad_agent_priv = mad_send_wr->mad_agent_priv;
2047         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2048         if (mad_agent_priv->agent.rmpp_version) {
2049                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2050                 if (ret == IB_RMPP_RESULT_CONSUMED)
2051                         goto done;
2052         } else
2053                 ret = IB_RMPP_RESULT_UNHANDLED;
2054
2055         if (mad_send_wc->status != IB_WC_SUCCESS &&
2056             mad_send_wr->status == IB_WC_SUCCESS) {
2057                 mad_send_wr->status = mad_send_wc->status;
2058                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2059         }
2060
2061         if (--mad_send_wr->refcount > 0) {
2062                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2063                     mad_send_wr->status == IB_WC_SUCCESS) {
2064                         wait_for_response(mad_send_wr);
2065                 }
2066                 goto done;
2067         }
2068
2069         /* Remove send from MAD agent and notify client of completion */
2070         list_del(&mad_send_wr->agent_list);
2071         adjust_timeout(mad_agent_priv);
2072         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2073
2074         if (mad_send_wr->status != IB_WC_SUCCESS )
2075                 mad_send_wc->status = mad_send_wr->status;
2076         if (ret == IB_RMPP_RESULT_INTERNAL)
2077                 ib_rmpp_send_handler(mad_send_wc);
2078         else
2079                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2080                                                    mad_send_wc);
2081
2082         /* Release reference on agent taken when sending */
2083         deref_mad_agent(mad_agent_priv);
2084         return;
2085 done:
2086         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2087 }
2088
2089 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2090                                      struct ib_wc *wc)
2091 {
2092         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2093         struct ib_mad_list_head         *mad_list;
2094         struct ib_mad_qp_info           *qp_info;
2095         struct ib_mad_queue             *send_queue;
2096         struct ib_send_wr               *bad_send_wr;
2097         struct ib_mad_send_wc           mad_send_wc;
2098         unsigned long flags;
2099         int ret;
2100
2101         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2102         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2103                                    mad_list);
2104         send_queue = mad_list->mad_queue;
2105         qp_info = send_queue->qp_info;
2106
2107 retry:
2108         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2109                             mad_send_wr->header_mapping,
2110                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2111         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2112                             mad_send_wr->payload_mapping,
2113                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2114         queued_send_wr = NULL;
2115         spin_lock_irqsave(&send_queue->lock, flags);
2116         list_del(&mad_list->list);
2117
2118         /* Move queued send to the send queue */
2119         if (send_queue->count-- > send_queue->max_active) {
2120                 mad_list = container_of(qp_info->overflow_list.next,
2121                                         struct ib_mad_list_head, list);
2122                 queued_send_wr = container_of(mad_list,
2123                                         struct ib_mad_send_wr_private,
2124                                         mad_list);
2125                 list_move_tail(&mad_list->list, &send_queue->list);
2126         }
2127         spin_unlock_irqrestore(&send_queue->lock, flags);
2128
2129         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2130         mad_send_wc.status = wc->status;
2131         mad_send_wc.vendor_err = wc->vendor_err;
2132         if (atomic_read(&qp_info->snoop_count))
2133                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2134                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2135         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2136
2137         if (queued_send_wr) {
2138                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2139                                    &bad_send_wr);
2140                 if (ret) {
2141                         printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
2142                         mad_send_wr = queued_send_wr;
2143                         wc->status = IB_WC_LOC_QP_OP_ERR;
2144                         goto retry;
2145                 }
2146         }
2147 }
2148
2149 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2150 {
2151         struct ib_mad_send_wr_private *mad_send_wr;
2152         struct ib_mad_list_head *mad_list;
2153         unsigned long flags;
2154
2155         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2156         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2157                 mad_send_wr = container_of(mad_list,
2158                                            struct ib_mad_send_wr_private,
2159                                            mad_list);
2160                 mad_send_wr->retry = 1;
2161         }
2162         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2163 }
2164
2165 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2166                               struct ib_wc *wc)
2167 {
2168         struct ib_mad_list_head *mad_list;
2169         struct ib_mad_qp_info *qp_info;
2170         struct ib_mad_send_wr_private *mad_send_wr;
2171         int ret;
2172
2173         /* Determine if failure was a send or receive */
2174         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2175         qp_info = mad_list->mad_queue->qp_info;
2176         if (mad_list->mad_queue == &qp_info->recv_queue)
2177                 /*
2178                  * Receive errors indicate that the QP has entered the error
2179                  * state - error handling/shutdown code will cleanup
2180                  */
2181                 return;
2182
2183         /*
2184          * Send errors will transition the QP to SQE - move
2185          * QP to RTS and repost flushed work requests
2186          */
2187         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2188                                    mad_list);
2189         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2190                 if (mad_send_wr->retry) {
2191                         /* Repost send */
2192                         struct ib_send_wr *bad_send_wr;
2193
2194                         mad_send_wr->retry = 0;
2195                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2196                                         &bad_send_wr);
2197                         if (ret)
2198                                 ib_mad_send_done_handler(port_priv, wc);
2199                 } else
2200                         ib_mad_send_done_handler(port_priv, wc);
2201         } else {
2202                 struct ib_qp_attr *attr;
2203
2204                 /* Transition QP to RTS and fail offending send */
2205                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2206                 if (attr) {
2207                         attr->qp_state = IB_QPS_RTS;
2208                         attr->cur_qp_state = IB_QPS_SQE;
2209                         ret = ib_modify_qp(qp_info->qp, attr,
2210                                            IB_QP_STATE | IB_QP_CUR_STATE);
2211                         kfree(attr);
2212                         if (ret)
2213                                 printk(KERN_ERR PFX "mad_error_handler - "
2214                                        "ib_modify_qp to RTS : %d\n", ret);
2215                         else
2216                                 mark_sends_for_retry(qp_info);
2217                 }
2218                 ib_mad_send_done_handler(port_priv, wc);
2219         }
2220 }
2221
2222 /*
2223  * IB MAD completion callback
2224  */
2225 static void ib_mad_completion_handler(struct work_struct *work)
2226 {
2227         struct ib_mad_port_private *port_priv;
2228         struct ib_wc wc;
2229
2230         port_priv = container_of(work, struct ib_mad_port_private, work);
2231         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2232
2233         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2234                 if (wc.status == IB_WC_SUCCESS) {
2235                         switch (wc.opcode) {
2236                         case IB_WC_SEND:
2237                                 ib_mad_send_done_handler(port_priv, &wc);
2238                                 break;
2239                         case IB_WC_RECV:
2240                                 ib_mad_recv_done_handler(port_priv, &wc);
2241                                 break;
2242                         default:
2243                                 BUG_ON(1);
2244                                 break;
2245                         }
2246                 } else
2247                         mad_error_handler(port_priv, &wc);
2248         }
2249 }
2250
2251 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2252 {
2253         unsigned long flags;
2254         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2255         struct ib_mad_send_wc mad_send_wc;
2256         struct list_head cancel_list;
2257
2258         INIT_LIST_HEAD(&cancel_list);
2259
2260         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2261         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2262                                  &mad_agent_priv->send_list, agent_list) {
2263                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2264                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2265                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2266                 }
2267         }
2268
2269         /* Empty wait list to prevent receives from finding a request */
2270         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2271         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2272
2273         /* Report all cancelled requests */
2274         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2275         mad_send_wc.vendor_err = 0;
2276
2277         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2278                                  &cancel_list, agent_list) {
2279                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2280                 list_del(&mad_send_wr->agent_list);
2281                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2282                                                    &mad_send_wc);
2283                 atomic_dec(&mad_agent_priv->refcount);
2284         }
2285 }
2286
2287 static struct ib_mad_send_wr_private*
2288 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2289              struct ib_mad_send_buf *send_buf)
2290 {
2291         struct ib_mad_send_wr_private *mad_send_wr;
2292
2293         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2294                             agent_list) {
2295                 if (&mad_send_wr->send_buf == send_buf)
2296                         return mad_send_wr;
2297         }
2298
2299         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2300                             agent_list) {
2301                 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2302                     &mad_send_wr->send_buf == send_buf)
2303                         return mad_send_wr;
2304         }
2305         return NULL;
2306 }
2307
2308 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2309                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2310 {
2311         struct ib_mad_agent_private *mad_agent_priv;
2312         struct ib_mad_send_wr_private *mad_send_wr;
2313         unsigned long flags;
2314         int active;
2315
2316         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2317                                       agent);
2318         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2319         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2320         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2321                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2322                 return -EINVAL;
2323         }
2324
2325         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2326         if (!timeout_ms) {
2327                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2328                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2329         }
2330
2331         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2332         if (active)
2333                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2334         else
2335                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2336
2337         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2338         return 0;
2339 }
2340 EXPORT_SYMBOL(ib_modify_mad);
2341
2342 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2343                    struct ib_mad_send_buf *send_buf)
2344 {
2345         ib_modify_mad(mad_agent, send_buf, 0);
2346 }
2347 EXPORT_SYMBOL(ib_cancel_mad);
2348
2349 static void local_completions(struct work_struct *work)
2350 {
2351         struct ib_mad_agent_private *mad_agent_priv;
2352         struct ib_mad_local_private *local;
2353         struct ib_mad_agent_private *recv_mad_agent;
2354         unsigned long flags;
2355         int free_mad;
2356         struct ib_wc wc;
2357         struct ib_mad_send_wc mad_send_wc;
2358
2359         mad_agent_priv =
2360                 container_of(work, struct ib_mad_agent_private, local_work);
2361
2362         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2363         while (!list_empty(&mad_agent_priv->local_list)) {
2364                 local = list_entry(mad_agent_priv->local_list.next,
2365                                    struct ib_mad_local_private,
2366                                    completion_list);
2367                 list_del(&local->completion_list);
2368                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2369                 free_mad = 0;
2370                 if (local->mad_priv) {
2371                         recv_mad_agent = local->recv_mad_agent;
2372                         if (!recv_mad_agent) {
2373                                 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2374                                 free_mad = 1;
2375                                 goto local_send_completion;
2376                         }
2377
2378                         /*
2379                          * Defined behavior is to complete response
2380                          * before request
2381                          */
2382                         build_smp_wc(recv_mad_agent->agent.qp,
2383                                      (unsigned long) local->mad_send_wr,
2384                                      be16_to_cpu(IB_LID_PERMISSIVE),
2385                                      0, recv_mad_agent->agent.port_num, &wc);
2386
2387                         local->mad_priv->header.recv_wc.wc = &wc;
2388                         local->mad_priv->header.recv_wc.mad_len =
2389                                                 sizeof(struct ib_mad);
2390                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2391                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2392                                  &local->mad_priv->header.recv_wc.rmpp_list);
2393                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2394                         local->mad_priv->header.recv_wc.recv_buf.mad =
2395                                                 &local->mad_priv->mad.mad;
2396                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2397                                 snoop_recv(recv_mad_agent->qp_info,
2398                                           &local->mad_priv->header.recv_wc,
2399                                            IB_MAD_SNOOP_RECVS);
2400                         recv_mad_agent->agent.recv_handler(
2401                                                 &recv_mad_agent->agent,
2402                                                 &local->mad_priv->header.recv_wc);
2403                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2404                         atomic_dec(&recv_mad_agent->refcount);
2405                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2406                 }
2407
2408 local_send_completion:
2409                 /* Complete send */
2410                 mad_send_wc.status = IB_WC_SUCCESS;
2411                 mad_send_wc.vendor_err = 0;
2412                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2413                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2414                         snoop_send(mad_agent_priv->qp_info,
2415                                    &local->mad_send_wr->send_buf,
2416                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2417                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2418                                                    &mad_send_wc);
2419
2420                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2421                 atomic_dec(&mad_agent_priv->refcount);
2422                 if (free_mad)
2423                         kmem_cache_free(ib_mad_cache, local->mad_priv);
2424                 kfree(local);
2425         }
2426         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2427 }
2428
2429 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2430 {
2431         int ret;
2432
2433         if (!mad_send_wr->retries_left)
2434                 return -ETIMEDOUT;
2435
2436         mad_send_wr->retries_left--;
2437         mad_send_wr->send_buf.retries++;
2438
2439         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2440
2441         if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2442                 ret = ib_retry_rmpp(mad_send_wr);
2443                 switch (ret) {
2444                 case IB_RMPP_RESULT_UNHANDLED:
2445                         ret = ib_send_mad(mad_send_wr);
2446                         break;
2447                 case IB_RMPP_RESULT_CONSUMED:
2448                         ret = 0;
2449                         break;
2450                 default:
2451                         ret = -ECOMM;
2452                         break;
2453                 }
2454         } else
2455                 ret = ib_send_mad(mad_send_wr);
2456
2457         if (!ret) {
2458                 mad_send_wr->refcount++;
2459                 list_add_tail(&mad_send_wr->agent_list,
2460                               &mad_send_wr->mad_agent_priv->send_list);
2461         }
2462         return ret;
2463 }
2464
2465 static void timeout_sends(struct work_struct *work)
2466 {
2467         struct ib_mad_agent_private *mad_agent_priv;
2468         struct ib_mad_send_wr_private *mad_send_wr;
2469         struct ib_mad_send_wc mad_send_wc;
2470         unsigned long flags, delay;
2471
2472         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2473                                       timed_work.work);
2474         mad_send_wc.vendor_err = 0;
2475
2476         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2477         while (!list_empty(&mad_agent_priv->wait_list)) {
2478                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2479                                          struct ib_mad_send_wr_private,
2480                                          agent_list);
2481
2482                 if (time_after(mad_send_wr->timeout, jiffies)) {
2483                         delay = mad_send_wr->timeout - jiffies;
2484                         if ((long)delay <= 0)
2485                                 delay = 1;
2486                         queue_delayed_work(mad_agent_priv->qp_info->
2487                                            port_priv->wq,
2488                                            &mad_agent_priv->timed_work, delay);
2489                         break;
2490                 }
2491
2492                 list_del(&mad_send_wr->agent_list);
2493                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2494                     !retry_send(mad_send_wr))
2495                         continue;
2496
2497                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2498
2499                 if (mad_send_wr->status == IB_WC_SUCCESS)
2500                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2501                 else
2502                         mad_send_wc.status = mad_send_wr->status;
2503                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2504                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2505                                                    &mad_send_wc);
2506
2507                 atomic_dec(&mad_agent_priv->refcount);
2508                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2509         }
2510         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2511 }
2512
2513 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2514 {
2515         struct ib_mad_port_private *port_priv = cq->cq_context;
2516         unsigned long flags;
2517
2518         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2519         if (!list_empty(&port_priv->port_list))
2520                 queue_work(port_priv->wq, &port_priv->work);
2521         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2522 }
2523
2524 /*
2525  * Allocate receive MADs and post receive WRs for them
2526  */
2527 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2528                                     struct ib_mad_private *mad)
2529 {
2530         unsigned long flags;
2531         int post, ret;
2532         struct ib_mad_private *mad_priv;
2533         struct ib_sge sg_list;
2534         struct ib_recv_wr recv_wr, *bad_recv_wr;
2535         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2536
2537         /* Initialize common scatter list fields */
2538         sg_list.length = sizeof *mad_priv - sizeof mad_priv->header;
2539         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2540
2541         /* Initialize common receive WR fields */
2542         recv_wr.next = NULL;
2543         recv_wr.sg_list = &sg_list;
2544         recv_wr.num_sge = 1;
2545
2546         do {
2547                 /* Allocate and map receive buffer */
2548                 if (mad) {
2549                         mad_priv = mad;
2550                         mad = NULL;
2551                 } else {
2552                         mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
2553                         if (!mad_priv) {
2554                                 printk(KERN_ERR PFX "No memory for receive buffer\n");
2555                                 ret = -ENOMEM;
2556                                 break;
2557                         }
2558                 }
2559                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2560                                                  &mad_priv->grh,
2561                                                  sizeof *mad_priv -
2562                                                    sizeof mad_priv->header,
2563                                                  DMA_FROM_DEVICE);
2564                 mad_priv->header.mapping = sg_list.addr;
2565                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2566                 mad_priv->header.mad_list.mad_queue = recv_queue;
2567
2568                 /* Post receive WR */
2569                 spin_lock_irqsave(&recv_queue->lock, flags);
2570                 post = (++recv_queue->count < recv_queue->max_active);
2571                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2572                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2573                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2574                 if (ret) {
2575                         spin_lock_irqsave(&recv_queue->lock, flags);
2576                         list_del(&mad_priv->header.mad_list.list);
2577                         recv_queue->count--;
2578                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2579                         ib_dma_unmap_single(qp_info->port_priv->device,
2580                                             mad_priv->header.mapping,
2581                                             sizeof *mad_priv -
2582                                               sizeof mad_priv->header,
2583                                             DMA_FROM_DEVICE);
2584                         kmem_cache_free(ib_mad_cache, mad_priv);
2585                         printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2586                         break;
2587                 }
2588         } while (post);
2589
2590         return ret;
2591 }
2592
2593 /*
2594  * Return all the posted receive MADs
2595  */
2596 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2597 {
2598         struct ib_mad_private_header *mad_priv_hdr;
2599         struct ib_mad_private *recv;
2600         struct ib_mad_list_head *mad_list;
2601
2602         while (!list_empty(&qp_info->recv_queue.list)) {
2603
2604                 mad_list = list_entry(qp_info->recv_queue.list.next,
2605                                       struct ib_mad_list_head, list);
2606                 mad_priv_hdr = container_of(mad_list,
2607                                             struct ib_mad_private_header,
2608                                             mad_list);
2609                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2610                                     header);
2611
2612                 /* Remove from posted receive MAD list */
2613                 list_del(&mad_list->list);
2614
2615                 ib_dma_unmap_single(qp_info->port_priv->device,
2616                                     recv->header.mapping,
2617                                     sizeof(struct ib_mad_private) -
2618                                       sizeof(struct ib_mad_private_header),
2619                                     DMA_FROM_DEVICE);
2620                 kmem_cache_free(ib_mad_cache, recv);
2621         }
2622
2623         qp_info->recv_queue.count = 0;
2624 }
2625
2626 /*
2627  * Start the port
2628  */
2629 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2630 {
2631         int ret, i;
2632         struct ib_qp_attr *attr;
2633         struct ib_qp *qp;
2634
2635         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2636         if (!attr) {
2637                 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2638                 return -ENOMEM;
2639         }
2640
2641         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2642                 qp = port_priv->qp_info[i].qp;
2643                 /*
2644                  * PKey index for QP1 is irrelevant but
2645                  * one is needed for the Reset to Init transition
2646                  */
2647                 attr->qp_state = IB_QPS_INIT;
2648                 attr->pkey_index = 0;
2649                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2650                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2651                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
2652                 if (ret) {
2653                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2654                                "INIT: %d\n", i, ret);
2655                         goto out;
2656                 }
2657
2658                 attr->qp_state = IB_QPS_RTR;
2659                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2660                 if (ret) {
2661                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2662                                "RTR: %d\n", i, ret);
2663                         goto out;
2664                 }
2665
2666                 attr->qp_state = IB_QPS_RTS;
2667                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2668                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2669                 if (ret) {
2670                         printk(KERN_ERR PFX "Couldn't change QP%d state to "
2671                                "RTS: %d\n", i, ret);
2672                         goto out;
2673                 }
2674         }
2675
2676         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2677         if (ret) {
2678                 printk(KERN_ERR PFX "Failed to request completion "
2679                        "notification: %d\n", ret);
2680                 goto out;
2681         }
2682
2683         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2684                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2685                 if (ret) {
2686                         printk(KERN_ERR PFX "Couldn't post receive WRs\n");
2687                         goto out;
2688                 }
2689         }
2690 out:
2691         kfree(attr);
2692         return ret;
2693 }
2694
2695 static void qp_event_handler(struct ib_event *event, void *qp_context)
2696 {
2697         struct ib_mad_qp_info   *qp_info = qp_context;
2698
2699         /* It's worse than that! He's dead, Jim! */
2700         printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n",
2701                 event->event, qp_info->qp->qp_num);
2702 }
2703
2704 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2705                            struct ib_mad_queue *mad_queue)
2706 {
2707         mad_queue->qp_info = qp_info;
2708         mad_queue->count = 0;
2709         spin_lock_init(&mad_queue->lock);
2710         INIT_LIST_HEAD(&mad_queue->list);
2711 }
2712
2713 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2714                         struct ib_mad_qp_info *qp_info)
2715 {
2716         qp_info->port_priv = port_priv;
2717         init_mad_queue(qp_info, &qp_info->send_queue);
2718         init_mad_queue(qp_info, &qp_info->recv_queue);
2719         INIT_LIST_HEAD(&qp_info->overflow_list);
2720         spin_lock_init(&qp_info->snoop_lock);
2721         qp_info->snoop_table = NULL;
2722         qp_info->snoop_table_size = 0;
2723         atomic_set(&qp_info->snoop_count, 0);
2724 }
2725
2726 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2727                          enum ib_qp_type qp_type)
2728 {
2729         struct ib_qp_init_attr  qp_init_attr;
2730         int ret;
2731
2732         memset(&qp_init_attr, 0, sizeof qp_init_attr);
2733         qp_init_attr.send_cq = qp_info->port_priv->cq;
2734         qp_init_attr.recv_cq = qp_info->port_priv->cq;
2735         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2736         qp_init_attr.cap.max_send_wr = mad_sendq_size;
2737         qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2738         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2739         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2740         qp_init_attr.qp_type = qp_type;
2741         qp_init_attr.port_num = qp_info->port_priv->port_num;
2742         qp_init_attr.qp_context = qp_info;
2743         qp_init_attr.event_handler = qp_event_handler;
2744         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2745         if (IS_ERR(qp_info->qp)) {
2746                 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n",
2747                        get_spl_qp_index(qp_type));
2748                 ret = PTR_ERR(qp_info->qp);
2749                 goto error;
2750         }
2751         /* Use minimum queue sizes unless the CQ is resized */
2752         qp_info->send_queue.max_active = mad_sendq_size;
2753         qp_info->recv_queue.max_active = mad_recvq_size;
2754         return 0;
2755
2756 error:
2757         return ret;
2758 }
2759
2760 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2761 {
2762         ib_destroy_qp(qp_info->qp);
2763         kfree(qp_info->snoop_table);
2764 }
2765
2766 /*
2767  * Open the port
2768  * Create the QP, PD, MR, and CQ if needed
2769  */
2770 static int ib_mad_port_open(struct ib_device *device,
2771                             int port_num)
2772 {
2773         int ret, cq_size;
2774         struct ib_mad_port_private *port_priv;
2775         unsigned long flags;
2776         char name[sizeof "ib_mad123"];
2777
2778         /* Create new device info */
2779         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2780         if (!port_priv) {
2781                 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
2782                 return -ENOMEM;
2783         }
2784
2785         port_priv->device = device;
2786         port_priv->port_num = port_num;
2787         spin_lock_init(&port_priv->reg_lock);
2788         INIT_LIST_HEAD(&port_priv->agent_list);
2789         init_mad_qp(port_priv, &port_priv->qp_info[0]);
2790         init_mad_qp(port_priv, &port_priv->qp_info[1]);
2791
2792         cq_size = (mad_sendq_size + mad_recvq_size) * 2;
2793         port_priv->cq = ib_create_cq(port_priv->device,
2794                                      ib_mad_thread_completion_handler,
2795                                      NULL, port_priv, cq_size, 0);
2796         if (IS_ERR(port_priv->cq)) {
2797                 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
2798                 ret = PTR_ERR(port_priv->cq);
2799                 goto error3;
2800         }
2801
2802         port_priv->pd = ib_alloc_pd(device);
2803         if (IS_ERR(port_priv->pd)) {
2804                 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n");
2805                 ret = PTR_ERR(port_priv->pd);
2806                 goto error4;
2807         }
2808
2809         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2810         if (IS_ERR(port_priv->mr)) {
2811                 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n");
2812                 ret = PTR_ERR(port_priv->mr);
2813                 goto error5;
2814         }
2815
2816         ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2817         if (ret)
2818                 goto error6;
2819         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2820         if (ret)
2821                 goto error7;
2822
2823         snprintf(name, sizeof name, "ib_mad%d", port_num);
2824         port_priv->wq = create_singlethread_workqueue(name);
2825         if (!port_priv->wq) {
2826                 ret = -ENOMEM;
2827                 goto error8;
2828         }
2829         INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2830
2831         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2832         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2833         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2834
2835         ret = ib_mad_port_start(port_priv);
2836         if (ret) {
2837                 printk(KERN_ERR PFX "Couldn't start port\n");
2838                 goto error9;
2839         }
2840
2841         return 0;
2842
2843 error9:
2844         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2845         list_del_init(&port_priv->port_list);
2846         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2847
2848         destroy_workqueue(port_priv->wq);
2849 error8:
2850         destroy_mad_qp(&port_priv->qp_info[1]);
2851 error7:
2852         destroy_mad_qp(&port_priv->qp_info[0]);
2853 error6:
2854         ib_dereg_mr(port_priv->mr);
2855 error5:
2856         ib_dealloc_pd(port_priv->pd);
2857 error4:
2858         ib_destroy_cq(port_priv->cq);
2859         cleanup_recv_queue(&port_priv->qp_info[1]);
2860         cleanup_recv_queue(&port_priv->qp_info[0]);
2861 error3:
2862         kfree(port_priv);
2863
2864         return ret;
2865 }
2866
2867 /*
2868  * Close the port
2869  * If there are no classes using the port, free the port
2870  * resources (CQ, MR, PD, QP) and remove the port's info structure
2871  */
2872 static int ib_mad_port_close(struct ib_device *device, int port_num)
2873 {
2874         struct ib_mad_port_private *port_priv;
2875         unsigned long flags;
2876
2877         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2878         port_priv = __ib_get_mad_port(device, port_num);
2879         if (port_priv == NULL) {
2880                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2881                 printk(KERN_ERR PFX "Port %d not found\n", port_num);
2882                 return -ENODEV;
2883         }
2884         list_del_init(&port_priv->port_list);
2885         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2886
2887         destroy_workqueue(port_priv->wq);
2888         destroy_mad_qp(&port_priv->qp_info[1]);
2889         destroy_mad_qp(&port_priv->qp_info[0]);
2890         ib_dereg_mr(port_priv->mr);
2891         ib_dealloc_pd(port_priv->pd);
2892         ib_destroy_cq(port_priv->cq);
2893         cleanup_recv_queue(&port_priv->qp_info[1]);
2894         cleanup_recv_queue(&port_priv->qp_info[0]);
2895         /* XXX: Handle deallocation of MAD registration tables */
2896
2897         kfree(port_priv);
2898
2899         return 0;
2900 }
2901
2902 static void ib_mad_init_device(struct ib_device *device)
2903 {
2904         int start, end, i;
2905
2906         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2907                 return;
2908
2909         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2910                 start = 0;
2911                 end   = 0;
2912         } else {
2913                 start = 1;
2914                 end   = device->phys_port_cnt;
2915         }
2916
2917         for (i = start; i <= end; i++) {
2918                 if (ib_mad_port_open(device, i)) {
2919                         printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2920                                device->name, i);
2921                         goto error;
2922                 }
2923                 if (ib_agent_port_open(device, i)) {
2924                         printk(KERN_ERR PFX "Couldn't open %s port %d "
2925                                "for agents\n",
2926                                device->name, i);
2927                         goto error_agent;
2928                 }
2929         }
2930         return;
2931
2932 error_agent:
2933         if (ib_mad_port_close(device, i))
2934                 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2935                        device->name, i);
2936
2937 error:
2938         i--;
2939
2940         while (i >= start) {
2941                 if (ib_agent_port_close(device, i))
2942                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2943                                "for agents\n",
2944                                device->name, i);
2945                 if (ib_mad_port_close(device, i))
2946                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2947                                device->name, i);
2948                 i--;
2949         }
2950 }
2951
2952 static void ib_mad_remove_device(struct ib_device *device)
2953 {
2954         int i, num_ports, cur_port;
2955
2956         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2957                 return;
2958
2959         if (device->node_type == RDMA_NODE_IB_SWITCH) {
2960                 num_ports = 1;
2961                 cur_port = 0;
2962         } else {
2963                 num_ports = device->phys_port_cnt;
2964                 cur_port = 1;
2965         }
2966         for (i = 0; i < num_ports; i++, cur_port++) {
2967                 if (ib_agent_port_close(device, cur_port))
2968                         printk(KERN_ERR PFX "Couldn't close %s port %d "
2969                                "for agents\n",
2970                                device->name, cur_port);
2971                 if (ib_mad_port_close(device, cur_port))
2972                         printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2973                                device->name, cur_port);
2974         }
2975 }
2976
2977 static struct ib_client mad_client = {
2978         .name   = "mad",
2979         .add = ib_mad_init_device,
2980         .remove = ib_mad_remove_device
2981 };
2982
2983 static int __init ib_mad_init_module(void)
2984 {
2985         int ret;
2986
2987         mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
2988         mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
2989
2990         mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
2991         mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
2992
2993         ib_mad_cache = kmem_cache_create("ib_mad",
2994                                          sizeof(struct ib_mad_private),
2995                                          0,
2996                                          SLAB_HWCACHE_ALIGN,
2997                                          NULL);
2998         if (!ib_mad_cache) {
2999                 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
3000                 ret = -ENOMEM;
3001                 goto error1;
3002         }
3003
3004         INIT_LIST_HEAD(&ib_mad_port_list);
3005
3006         if (ib_register_client(&mad_client)) {
3007                 printk(KERN_ERR PFX "Couldn't register ib_mad client\n");
3008                 ret = -EINVAL;
3009                 goto error2;
3010         }
3011
3012         return 0;
3013
3014 error2:
3015         kmem_cache_destroy(ib_mad_cache);
3016 error1:
3017         return ret;
3018 }
3019
3020 static void __exit ib_mad_cleanup_module(void)
3021 {
3022         ib_unregister_client(&mad_client);
3023         kmem_cache_destroy(ib_mad_cache);
3024 }
3025
3026 module_init(ib_mad_init_module);
3027 module_exit(ib_mad_cleanup_module);