net: igmp: fix source address check for IGMPv3 reports
[pandora-kernel.git] / net / core / pktgen.c
index e35a6fb..7879b2f 100644 (file)
@@ -568,7 +568,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
                           "     dst_min: %s  dst_max: %s\n",
                           pkt_dev->dst_min, pkt_dev->dst_max);
                seq_printf(seq,
-                          "        src_min: %s  src_max: %s\n",
+                          "     src_min: %s  src_max: %s\n",
                           pkt_dev->src_min, pkt_dev->src_max);
        }
 
@@ -1803,10 +1803,13 @@ static ssize_t pktgen_thread_write(struct file *file,
                        return -EFAULT;
                i += len;
                mutex_lock(&pktgen_thread_lock);
-               pktgen_add_device(t, f);
+               ret = pktgen_add_device(t, f);
                mutex_unlock(&pktgen_thread_lock);
-               ret = count;
-               sprintf(pg_result, "OK: add_device=%s", f);
+               if (!ret) {
+                       ret = count;
+                       sprintf(pg_result, "OK: add_device=%s", f);
+               } else
+                       sprintf(pg_result, "ERROR: can not add device %s", f);
                goto out;
        }
 
@@ -1932,7 +1935,7 @@ static int pktgen_device_event(struct notifier_block *unused,
 {
        struct net_device *dev = ptr;
 
-       if (!net_eq(dev_net(dev), &init_net))
+       if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
                return NOTIFY_DONE;
 
        /* It is OK that we do not hold the group lock right now,
@@ -2145,9 +2148,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
        }
 
        start_time = ktime_now();
-       if (remaining < 100000)
-               ndelay(remaining);      /* really small just spin */
-       else {
+       if (remaining < 100000) {
+               /* for small delays (<100us), just loop until limit is reached */
+               do {
+                       end_time = ktime_now();
+               } while (ktime_lt(end_time, spin_until));
+       } else {
                /* see do_nanosleep */
                hrtimer_init_sleeper(&t, current);
                do {
@@ -2162,8 +2168,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
                        hrtimer_cancel(&t.timer);
                } while (t.task && pkt_dev->running && !signal_pending(current));
                __set_current_state(TASK_RUNNING);
+               end_time = ktime_now();
        }
-       end_time = ktime_now();
 
        pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
        pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
@@ -2518,6 +2524,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
                if (x) {
                        int ret;
                        __u8 *eth;
+                       struct iphdr *iph;
+
                        nhead = x->props.header_len - skb_headroom(skb);
                        if (nhead > 0) {
                                ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@@ -2539,6 +2547,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
                        eth = (__u8 *) skb_push(skb, ETH_HLEN);
                        memcpy(eth, pkt_dev->hh, 12);
                        *(u16 *) &eth[12] = protocol;
+
+                       /* Update IPv4 header len as well as checksum value */
+                       iph = ip_hdr(skb);
+                       iph->tot_len = htons(skb->len - ETH_HLEN);
+                       ip_send_check(iph);
                }
        }
        return 1;
@@ -2602,18 +2615,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
                                if (!pkt_dev->page)
                                        break;
                        }
-                       skb_shinfo(skb)->frags[i].page = pkt_dev->page;
                        get_page(pkt_dev->page);
+                       skb_frag_set_page(skb, i, pkt_dev->page);
                        skb_shinfo(skb)->frags[i].page_offset = 0;
                        /*last fragment, fill rest of data*/
                        if (i == (frags - 1))
-                               skb_shinfo(skb)->frags[i].size =
-                                   (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+                               skb_frag_size_set(&skb_shinfo(skb)->frags[i],
+                                   (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
                        else
-                               skb_shinfo(skb)->frags[i].size = frag_len;
-                       datalen -= skb_shinfo(skb)->frags[i].size;
-                       skb->len += skb_shinfo(skb)->frags[i].size;
-                       skb->data_len += skb_shinfo(skb)->frags[i].size;
+                               skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
+                       datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+                       skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+                       skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
                        i++;
                        skb_shinfo(skb)->nr_frags = i;
                }
@@ -2932,7 +2945,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
                  sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
                  pkt_dev->pkt_overhead;
 
-       if (datalen < sizeof(struct pktgen_hdr)) {
+       if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
                datalen = sizeof(struct pktgen_hdr);
                if (net_ratelimit())
                        pr_info("increased datalen to %d\n", datalen);
@@ -3468,8 +3481,10 @@ static int pktgen_thread_worker(void *arg)
        pktgen_rem_thread(t);
 
        /* Wait for kthread_stop */
-       while (!kthread_should_stop()) {
+       for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
+               if (kthread_should_stop())
+                       break;
                schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -3755,12 +3770,18 @@ static void __exit pg_cleanup(void)
 {
        struct pktgen_thread *t;
        struct list_head *q, *n;
+       LIST_HEAD(list);
 
        /* Stop all interfaces & threads */
        pktgen_exiting = true;
 
-       list_for_each_safe(q, n, &pktgen_threads) {
+       mutex_lock(&pktgen_thread_lock);
+       list_splice_init(&pktgen_threads, &list);
+       mutex_unlock(&pktgen_thread_lock);
+
+       list_for_each_safe(q, n, &list) {
                t = list_entry(q, struct pktgen_thread, th_list);
+               list_del(&t->th_list);
                kthread_stop(t->tsk);
                kfree(t);
        }