Merge branch 'nfs-for-2.6.32'
[pandora-kernel.git] / drivers / infiniband / core / multicast.c
index 4a579b3..8d82ba1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -34,7 +34,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
 #include <linux/bitops.h>
 #include <linux/random.h>
 
@@ -74,11 +73,20 @@ struct mcast_device {
 };
 
 enum mcast_state {
-       MCAST_IDLE,
        MCAST_JOINING,
        MCAST_MEMBER,
+       MCAST_ERROR,
+};
+
+enum mcast_group_state {
+       MCAST_IDLE,
        MCAST_BUSY,
-       MCAST_ERROR
+       MCAST_GROUP_ERROR,
+       MCAST_PKEY_EVENT
+};
+
+enum {
+       MCAST_INVALID_PKEY_INDEX = 0xFFFF
 };
 
 struct mcast_member;
@@ -94,9 +102,12 @@ struct mcast_group {
        struct mcast_member     *last_join;
        int                     members[3];
        atomic_t                refcount;
-       enum mcast_state        state;
+       enum mcast_group_state  state;
        struct ib_sa_query      *query;
        int                     query_id;
+       u16                     pkey_index;
+       u8                      leave_state;
+       int                     retries;
 };
 
 struct mcast_member {
@@ -197,7 +208,7 @@ static void queue_join(struct mcast_member *member)
        unsigned long flags;
 
        spin_lock_irqsave(&group->lock, flags);
-       list_add(&member->list, &group->pending_list);
+       list_add_tail(&member->list, &group->pending_list);
        if (group->state == MCAST_IDLE) {
                group->state = MCAST_BUSY;
                atomic_inc(&group->refcount);
@@ -341,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
 
        rec = group->rec;
        rec.join_state = leave_state;
+       group->leave_state = leave_state;
 
        ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
                                       port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -379,9 +391,19 @@ static int fail_join(struct mcast_group *group, struct mcast_member *member,
 static void process_group_error(struct mcast_group *group)
 {
        struct mcast_member *member;
-       int ret;
+       int ret = 0;
+       u16 pkey_index;
+
+       if (group->state == MCAST_PKEY_EVENT)
+               ret = ib_find_pkey(group->port->dev->device,
+                                  group->port->port_num,
+                                  be16_to_cpu(group->rec.pkey), &pkey_index);
 
        spin_lock_irq(&group->lock);
+       if (group->state == MCAST_PKEY_EVENT && !ret &&
+           group->pkey_index == pkey_index)
+               goto out;
+
        while (!list_empty(&group->active_list)) {
                member = list_entry(group->active_list.next,
                                    struct mcast_member, list);
@@ -400,6 +422,7 @@ static void process_group_error(struct mcast_group *group)
        }
 
        group->rec.join_state = 0;
+out:
        group->state = MCAST_BUSY;
        spin_unlock_irq(&group->lock);
 }
@@ -416,9 +439,9 @@ static void mcast_work_handler(struct work_struct *work)
 retest:
        spin_lock_irq(&group->lock);
        while (!list_empty(&group->pending_list) ||
-              (group->state == MCAST_ERROR)) {
+              (group->state != MCAST_BUSY)) {
 
-               if (group->state == MCAST_ERROR) {
+               if (group->state != MCAST_BUSY) {
                        spin_unlock_irq(&group->lock);
                        process_group_error(group);
                        goto retest;
@@ -495,12 +518,19 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
                         void *context)
 {
        struct mcast_group *group = context;
+       u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
 
        if (status)
                process_join_error(group, status);
        else {
+               ib_find_pkey(group->port->dev->device, group->port->port_num,
+                            be16_to_cpu(rec->pkey), &pkey_index);
+
                spin_lock_irq(&group->port->lock);
                group->rec = *rec;
+               if (group->state == MCAST_BUSY &&
+                   group->pkey_index == MCAST_INVALID_PKEY_INDEX)
+                       group->pkey_index = pkey_index;
                if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
                        rb_erase(&group->node, &group->port->table);
                        mcast_insert(group->port, group, 1);
@@ -515,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
 {
        struct mcast_group *group = context;
 
-       mcast_work_handler(&group->work);
+       if (status && group->retries > 0 &&
+           !send_leave(group, group->leave_state))
+               group->retries--;
+       else
+               mcast_work_handler(&group->work);
 }
 
 static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -538,8 +572,10 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
        if (!group)
                return NULL;
 
+       group->retries = 3;
        group->port = port;
        group->rec.mgid = *mgid;
+       group->pkey_index = MCAST_INVALID_PKEY_INDEX;
        INIT_LIST_HEAD(&group->pending_list);
        INIT_LIST_HEAD(&group->active_list);
        INIT_WORK(&group->work, mcast_work_handler);
@@ -708,7 +744,8 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
 }
 EXPORT_SYMBOL(ib_init_ah_from_mcmember);
 
-static void mcast_groups_lost(struct mcast_port *port)
+static void mcast_groups_event(struct mcast_port *port,
+                              enum mcast_group_state state)
 {
        struct mcast_group *group;
        struct rb_node *node;
@@ -722,7 +759,8 @@ static void mcast_groups_lost(struct mcast_port *port)
                        atomic_inc(&group->refcount);
                        queue_work(mcast_wq, &group->work);
                }
-               group->state = MCAST_ERROR;
+               if (group->state != MCAST_GROUP_ERROR)
+                       group->state = state;
                spin_unlock(&group->lock);
        }
        spin_unlock_irqrestore(&port->lock, flags);
@@ -732,16 +770,20 @@ static void mcast_event_handler(struct ib_event_handler *handler,
                                struct ib_event *event)
 {
        struct mcast_device *dev;
+       int index;
 
        dev = container_of(handler, struct mcast_device, event_handler);
+       index = event->element.port_num - dev->start_port;
 
        switch (event->event) {
        case IB_EVENT_PORT_ERR:
        case IB_EVENT_LID_CHANGE:
        case IB_EVENT_SM_CHANGE:
        case IB_EVENT_CLIENT_REREGISTER:
-               mcast_groups_lost(&dev->port[event->element.port_num -
-                                            dev->start_port]);
+               mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
+               break;
+       case IB_EVENT_PKEY_CHANGE:
+               mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
                break;
        default:
                break;