batman-adv: Add missing hardif_free_ref in forw_packet_free
[pandora-kernel.git] / net / batman-adv / send.c
index d49e54d..76daa46 100644 (file)
@@ -121,7 +121,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
        /* adjust all flags and log packets */
        while (aggregated_packet(buff_pos,
                                 forw_packet->packet_len,
-                                batman_packet->num_hna)) {
+                                batman_packet->num_tt)) {
 
                /* we might have aggregated direct link packets with an
                 * ordinary base packet */
@@ -146,7 +146,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
                        hard_iface->net_dev->dev_addr);
 
                buff_pos += sizeof(struct batman_packet) +
-                       (batman_packet->num_hna * ETH_ALEN);
+                       (batman_packet->num_tt * ETH_ALEN);
                packet_num++;
                batman_packet = (struct batman_packet *)
                        (forw_packet->skb->data + buff_pos);
@@ -222,7 +222,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
        struct batman_packet *batman_packet;
 
        new_len = sizeof(struct batman_packet) +
-                       (bat_priv->num_local_hna * ETH_ALEN);
+                       (bat_priv->num_local_tt * ETH_ALEN);
        new_buff = kmalloc(new_len, GFP_ATOMIC);
 
        /* keep old buffer if kmalloc should fail */
@@ -231,7 +231,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
                       sizeof(struct batman_packet));
                batman_packet = (struct batman_packet *)new_buff;
 
-               batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
+               batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
                                new_buff + sizeof(struct batman_packet),
                                new_len - sizeof(struct batman_packet));
 
@@ -244,6 +244,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
 void schedule_own_packet(struct hard_iface *hard_iface)
 {
        struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+       struct hard_iface *primary_if;
        unsigned long send_time;
        struct batman_packet *batman_packet;
        int vis_server;
@@ -253,6 +254,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
                return;
 
        vis_server = atomic_read(&bat_priv->vis_mode);
+       primary_if = primary_if_get_selected(bat_priv);
 
        /**
         * the interface gets activated here to avoid race conditions between
@@ -264,9 +266,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
        if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
                hard_iface->if_status = IF_ACTIVE;
 
-       /* if local hna has changed and interface is a primary interface */
-       if ((atomic_read(&bat_priv->hna_local_changed)) &&
-           (hard_iface == bat_priv->primary_if))
+       /* if local tt has changed and interface is a primary interface */
+       if ((atomic_read(&bat_priv->tt_local_changed)) &&
+           (hard_iface == primary_if))
                rebuild_batman_packet(bat_priv, hard_iface);
 
        /**
@@ -284,7 +286,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
        else
                batman_packet->flags &= ~VIS_SERVER;
 
-       if ((hard_iface == bat_priv->primary_if) &&
+       if ((hard_iface == primary_if) &&
            (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
                batman_packet->gw_flags =
                                (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
@@ -299,15 +301,19 @@ void schedule_own_packet(struct hard_iface *hard_iface)
                               hard_iface->packet_buff,
                               hard_iface->packet_len,
                               hard_iface, 1, send_time);
+
+       if (primary_if)
+               hardif_free_ref(primary_if);
 }
 
 void schedule_forward_packet(struct orig_node *orig_node,
                             struct ethhdr *ethhdr,
                             struct batman_packet *batman_packet,
-                            uint8_t directlink, int hna_buff_len,
+                            uint8_t directlink, int tt_buff_len,
                             struct hard_iface *if_incoming)
 {
        struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+       struct neigh_node *router;
        unsigned char in_tq, in_ttl, tq_avg = 0;
        unsigned long send_time;
 
@@ -316,6 +322,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
                return;
        }
 
+       router = orig_node_get_router(orig_node);
+
        in_tq = batman_packet->tq;
        in_ttl = batman_packet->ttl;
 
@@ -324,20 +332,22 @@ void schedule_forward_packet(struct orig_node *orig_node,
 
        /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
         * of our best tq value */
-       if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
+       if (router && router->tq_avg != 0) {
 
                /* rebroadcast ogm of best ranking neighbor as is */
-               if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
-                       batman_packet->tq = orig_node->router->tq_avg;
+               if (!compare_eth(router->addr, ethhdr->h_source)) {
+                       batman_packet->tq = router->tq_avg;
 
-                       if (orig_node->router->last_ttl)
-                               batman_packet->ttl = orig_node->router->last_ttl
-                                                       - 1;
+                       if (router->last_ttl)
+                               batman_packet->ttl = router->last_ttl - 1;
                }
 
-               tq_avg = orig_node->router->tq_avg;
+               tq_avg = router->tq_avg;
        }
 
+       if (router)
+               neigh_node_free_ref(router);
+
        /* apply hop penalty */
        batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
 
@@ -359,7 +369,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
        send_time = forward_send_time();
        add_bat_packet_to_list(bat_priv,
                               (unsigned char *)batman_packet,
-                              sizeof(struct batman_packet) + hna_buff_len,
+                              sizeof(struct batman_packet) + tt_buff_len,
                               if_incoming, 0, send_time);
 }
 
@@ -367,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet)
 {
        if (forw_packet->skb)
                kfree_skb(forw_packet->skb);
+       if (forw_packet->if_incoming)
+               hardif_free_ref(forw_packet->if_incoming);
        kfree(forw_packet);
 }
 
@@ -388,7 +400,6 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
                           send_time);
 }
 
-#define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
 /* add a broadcast packet to the queue and setup timers. broadcast packets
  * are sent multiple times to increase probability for beeing received.
  *
@@ -399,6 +410,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
  * skb is freed. */
 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
 {
+       struct hard_iface *primary_if = NULL;
        struct forw_packet *forw_packet;
        struct bcast_packet *bcast_packet;
 
@@ -407,7 +419,8 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
                goto out;
        }
 
-       if (!bat_priv->primary_if)
+       primary_if = primary_if_get_selected(bat_priv);
+       if (!primary_if)
                goto out;
 
        forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
@@ -426,7 +439,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
        skb_reset_mac_header(skb);
 
        forw_packet->skb = skb;
-       forw_packet->if_incoming = bat_priv->primary_if;
+       forw_packet->if_incoming = primary_if;
 
        /* how often did we send the bcast packet ? */
        forw_packet->num_packets = 0;
@@ -439,6 +452,8 @@ packet_free:
 out_and_inc:
        atomic_inc(&bat_priv->bcast_queue_left);
 out:
+       if (primary_if)
+               hardif_free_ref(primary_if);
        return NETDEV_TX_BUSY;
 }
 
@@ -526,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
 {
        struct forw_packet *forw_packet;
        struct hlist_node *tmp_node, *safe_tmp_node;
+       bool pending;
 
        if (hard_iface)
                bat_dbg(DBG_BATMAN, bat_priv,
@@ -554,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * send_outstanding_bcast_packet() will lock the list to
                 * delete the item from the list
                 */
-               cancel_delayed_work_sync(&forw_packet->delayed_work);
+               pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
                spin_lock_bh(&bat_priv->forw_bcast_list_lock);
+
+               if (pending) {
+                       hlist_del(&forw_packet->list);
+                       forw_packet_free(forw_packet);
+               }
        }
        spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
 
@@ -578,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
                 * send_outstanding_bat_packet() will lock the list to
                 * delete the item from the list
                 */
-               cancel_delayed_work_sync(&forw_packet->delayed_work);
+               pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
                spin_lock_bh(&bat_priv->forw_bat_list_lock);
+
+               if (pending) {
+                       hlist_del(&forw_packet->list);
+                       forw_packet_free(forw_packet);
+               }
        }
        spin_unlock_bh(&bat_priv->forw_bat_list_lock);
 }